linux/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#ifdef CONFIG_RFS_ACCEL
  34
  35#include <linux/hash.h>
  36#include <linux/mlx5/fs.h>
  37#include <linux/ip.h>
  38#include <linux/ipv6.h>
  39#include "en.h"
  40
  41struct arfs_tuple {
  42        __be16 etype;
  43        u8     ip_proto;
  44        union {
  45                __be32 src_ipv4;
  46                struct in6_addr src_ipv6;
  47        };
  48        union {
  49                __be32 dst_ipv4;
  50                struct in6_addr dst_ipv6;
  51        };
  52        __be16 src_port;
  53        __be16 dst_port;
  54};
  55
  56struct arfs_rule {
  57        struct mlx5e_priv       *priv;
  58        struct work_struct      arfs_work;
  59        struct mlx5_flow_handle *rule;
  60        struct hlist_node       hlist;
  61        int                     rxq;
  62        /* Flow ID passed to ndo_rx_flow_steer */
  63        int                     flow_id;
  64        /* Filter ID returned by ndo_rx_flow_steer */
  65        int                     filter_id;
  66        struct arfs_tuple       tuple;
  67};
  68
  69#define mlx5e_for_each_arfs_rule(hn, tmp, arfs_tables, i, j) \
  70        for (i = 0; i < ARFS_NUM_TYPES; i++) \
  71                mlx5e_for_each_hash_arfs_rule(hn, tmp, arfs_tables[i].rules_hash, j)
  72
  73#define mlx5e_for_each_hash_arfs_rule(hn, tmp, hash, j) \
  74        for (j = 0; j < ARFS_HASH_SIZE; j++) \
  75                hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist)
  76
  77static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type)
  78{
  79        switch (type) {
  80        case ARFS_IPV4_TCP:
  81                return MLX5E_TT_IPV4_TCP;
  82        case ARFS_IPV4_UDP:
  83                return MLX5E_TT_IPV4_UDP;
  84        case ARFS_IPV6_TCP:
  85                return MLX5E_TT_IPV6_TCP;
  86        case ARFS_IPV6_UDP:
  87                return MLX5E_TT_IPV6_UDP;
  88        default:
  89                return -EINVAL;
  90        }
  91}
  92
  93static int arfs_disable(struct mlx5e_priv *priv)
  94{
  95        struct mlx5_flow_destination dest;
  96        struct mlx5e_tir *tir = priv->indir_tir;
  97        int err = 0;
  98        int tt;
  99        int i;
 100
 101        dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
 102        for (i = 0; i < ARFS_NUM_TYPES; i++) {
 103                dest.tir_num = tir[i].tirn;
 104                tt = arfs_get_tt(i);
 105                /* Modify ttc rules destination to bypass the aRFS tables*/
 106                err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
 107                                                   &dest, NULL);
 108                if (err) {
 109                        netdev_err(priv->netdev,
 110                                   "%s: modify ttc destination failed\n",
 111                                   __func__);
 112                        return err;
 113                }
 114        }
 115        return 0;
 116}
 117
 118static void arfs_del_rules(struct mlx5e_priv *priv);
 119
 120int mlx5e_arfs_disable(struct mlx5e_priv *priv)
 121{
 122        arfs_del_rules(priv);
 123
 124        return arfs_disable(priv);
 125}
 126
 127int mlx5e_arfs_enable(struct mlx5e_priv *priv)
 128{
 129        struct mlx5_flow_destination dest;
 130        int err = 0;
 131        int tt;
 132        int i;
 133
 134        dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
 135        for (i = 0; i < ARFS_NUM_TYPES; i++) {
 136                dest.ft = priv->fs.arfs.arfs_tables[i].ft.t;
 137                tt = arfs_get_tt(i);
 138                /* Modify ttc rules destination to point on the aRFS FTs */
 139                err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
 140                                                   &dest, NULL);
 141                if (err) {
 142                        netdev_err(priv->netdev,
 143                                   "%s: modify ttc destination failed err=%d\n",
 144                                   __func__, err);
 145                        arfs_disable(priv);
 146                        return err;
 147                }
 148        }
 149        return 0;
 150}
 151
 152static void arfs_destroy_table(struct arfs_table *arfs_t)
 153{
 154        mlx5_del_flow_rules(arfs_t->default_rule);
 155        mlx5e_destroy_flow_table(&arfs_t->ft);
 156}
 157
 158void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
 159{
 160        int i;
 161
 162        if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
 163                return;
 164
 165        arfs_del_rules(priv);
 166        destroy_workqueue(priv->fs.arfs.wq);
 167        for (i = 0; i < ARFS_NUM_TYPES; i++) {
 168                if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t))
 169                        arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]);
 170        }
 171}
 172
 173static int arfs_add_default_rule(struct mlx5e_priv *priv,
 174                                 enum arfs_type type)
 175{
 176        struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
 177        struct mlx5e_tir *tir = priv->indir_tir;
 178        struct mlx5_flow_destination dest;
 179        MLX5_DECLARE_FLOW_ACT(flow_act);
 180        struct mlx5_flow_spec *spec;
 181        enum mlx5e_traffic_types tt;
 182        int err = 0;
 183
 184        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
 185        if (!spec) {
 186                err = -ENOMEM;
 187                goto out;
 188        }
 189
 190        dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
 191        tt = arfs_get_tt(type);
 192        if (tt == -EINVAL) {
 193                netdev_err(priv->netdev, "%s: bad arfs_type: %d\n",
 194                           __func__, type);
 195                err = -EINVAL;
 196                goto out;
 197        }
 198
 199        dest.tir_num = tir[tt].tirn;
 200
 201        arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
 202                                                   &flow_act,
 203                                                   &dest, 1);
 204        if (IS_ERR(arfs_t->default_rule)) {
 205                err = PTR_ERR(arfs_t->default_rule);
 206                arfs_t->default_rule = NULL;
 207                netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n",
 208                           __func__, type);
 209        }
 210out:
 211        kvfree(spec);
 212        return err;
 213}
 214
 215#define MLX5E_ARFS_NUM_GROUPS   2
 216#define MLX5E_ARFS_GROUP1_SIZE  BIT(12)
 217#define MLX5E_ARFS_GROUP2_SIZE  BIT(0)
 218#define MLX5E_ARFS_TABLE_SIZE   (MLX5E_ARFS_GROUP1_SIZE +\
 219                                 MLX5E_ARFS_GROUP2_SIZE)
 220static int arfs_create_groups(struct mlx5e_flow_table *ft,
 221                              enum  arfs_type type)
 222{
 223        int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
 224        void *outer_headers_c;
 225        int ix = 0;
 226        u32 *in;
 227        int err;
 228        u8 *mc;
 229
 230        ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
 231                        sizeof(*ft->g), GFP_KERNEL);
 232        in = kvzalloc(inlen, GFP_KERNEL);
 233        if  (!in || !ft->g) {
 234                kvfree(ft->g);
 235                kvfree(in);
 236                return -ENOMEM;
 237        }
 238
 239        mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
 240        outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc,
 241                                       outer_headers);
 242        MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
 243        switch (type) {
 244        case ARFS_IPV4_TCP:
 245        case ARFS_IPV6_TCP:
 246                MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
 247                MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
 248                break;
 249        case ARFS_IPV4_UDP:
 250        case ARFS_IPV6_UDP:
 251                MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
 252                MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_sport);
 253                break;
 254        default:
 255                err = -EINVAL;
 256                goto out;
 257        }
 258
 259        switch (type) {
 260        case ARFS_IPV4_TCP:
 261        case ARFS_IPV4_UDP:
 262                MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
 263                                 src_ipv4_src_ipv6.ipv4_layout.ipv4);
 264                MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
 265                                 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
 266                break;
 267        case ARFS_IPV6_TCP:
 268        case ARFS_IPV6_UDP:
 269                memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
 270                                    src_ipv4_src_ipv6.ipv6_layout.ipv6),
 271                       0xff, 16);
 272                memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
 273                                    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
 274                       0xff, 16);
 275                break;
 276        default:
 277                err = -EINVAL;
 278                goto out;
 279        }
 280
 281        MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
 282        MLX5_SET_CFG(in, start_flow_index, ix);
 283        ix += MLX5E_ARFS_GROUP1_SIZE;
 284        MLX5_SET_CFG(in, end_flow_index, ix - 1);
 285        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
 286        if (IS_ERR(ft->g[ft->num_groups]))
 287                goto err;
 288        ft->num_groups++;
 289
 290        memset(in, 0, inlen);
 291        MLX5_SET_CFG(in, start_flow_index, ix);
 292        ix += MLX5E_ARFS_GROUP2_SIZE;
 293        MLX5_SET_CFG(in, end_flow_index, ix - 1);
 294        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
 295        if (IS_ERR(ft->g[ft->num_groups]))
 296                goto err;
 297        ft->num_groups++;
 298
 299        kvfree(in);
 300        return 0;
 301
 302err:
 303        err = PTR_ERR(ft->g[ft->num_groups]);
 304        ft->g[ft->num_groups] = NULL;
 305out:
 306        kvfree(in);
 307
 308        return err;
 309}
 310
 311static int arfs_create_table(struct mlx5e_priv *priv,
 312                             enum arfs_type type)
 313{
 314        struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
 315        struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
 316        struct mlx5_flow_table_attr ft_attr = {};
 317        int err;
 318
 319        ft->num_groups = 0;
 320
 321        ft_attr.max_fte = MLX5E_ARFS_TABLE_SIZE;
 322        ft_attr.level = MLX5E_ARFS_FT_LEVEL;
 323        ft_attr.prio = MLX5E_NIC_PRIO;
 324
 325        ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
 326        if (IS_ERR(ft->t)) {
 327                err = PTR_ERR(ft->t);
 328                ft->t = NULL;
 329                return err;
 330        }
 331
 332        err = arfs_create_groups(ft, type);
 333        if (err)
 334                goto err;
 335
 336        err = arfs_add_default_rule(priv, type);
 337        if (err)
 338                goto err;
 339
 340        return 0;
 341err:
 342        mlx5e_destroy_flow_table(ft);
 343        return err;
 344}
 345
 346int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
 347{
 348        int err = 0;
 349        int i;
 350
 351        if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
 352                return 0;
 353
 354        spin_lock_init(&priv->fs.arfs.arfs_lock);
 355        INIT_LIST_HEAD(&priv->fs.arfs.rules);
 356        priv->fs.arfs.wq = create_singlethread_workqueue("mlx5e_arfs");
 357        if (!priv->fs.arfs.wq)
 358                return -ENOMEM;
 359
 360        for (i = 0; i < ARFS_NUM_TYPES; i++) {
 361                err = arfs_create_table(priv, i);
 362                if (err)
 363                        goto err;
 364        }
 365        return 0;
 366err:
 367        mlx5e_arfs_destroy_tables(priv);
 368        return err;
 369}
 370
 371#define MLX5E_ARFS_EXPIRY_QUOTA 60
 372
 373static void arfs_may_expire_flow(struct mlx5e_priv *priv)
 374{
 375        struct arfs_rule *arfs_rule;
 376        struct hlist_node *htmp;
 377        int quota = 0;
 378        int i;
 379        int j;
 380
 381        HLIST_HEAD(del_list);
 382        spin_lock_bh(&priv->fs.arfs.arfs_lock);
 383        mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
 384                if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
 385                        break;
 386                if (!work_pending(&arfs_rule->arfs_work) &&
 387                    rps_may_expire_flow(priv->netdev,
 388                                        arfs_rule->rxq, arfs_rule->flow_id,
 389                                        arfs_rule->filter_id)) {
 390                        hlist_del_init(&arfs_rule->hlist);
 391                        hlist_add_head(&arfs_rule->hlist, &del_list);
 392                }
 393        }
 394        spin_unlock_bh(&priv->fs.arfs.arfs_lock);
 395        hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
 396                if (arfs_rule->rule)
 397                        mlx5_del_flow_rules(arfs_rule->rule);
 398                hlist_del(&arfs_rule->hlist);
 399                kfree(arfs_rule);
 400        }
 401}
 402
 403static void arfs_del_rules(struct mlx5e_priv *priv)
 404{
 405        struct hlist_node *htmp;
 406        struct arfs_rule *rule;
 407        int i;
 408        int j;
 409
 410        HLIST_HEAD(del_list);
 411        spin_lock_bh(&priv->fs.arfs.arfs_lock);
 412        mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
 413                hlist_del_init(&rule->hlist);
 414                hlist_add_head(&rule->hlist, &del_list);
 415        }
 416        spin_unlock_bh(&priv->fs.arfs.arfs_lock);
 417
 418        hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
 419                cancel_work_sync(&rule->arfs_work);
 420                if (rule->rule)
 421                        mlx5_del_flow_rules(rule->rule);
 422                hlist_del(&rule->hlist);
 423                kfree(rule);
 424        }
 425}
 426
 427static struct hlist_head *
 428arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
 429                 __be16 dst_port)
 430{
 431        unsigned long l;
 432        int bucket_idx;
 433
 434        l = (__force unsigned long)src_port |
 435            ((__force unsigned long)dst_port << 2);
 436
 437        bucket_idx = hash_long(l, ARFS_HASH_SHIFT);
 438
 439        return &arfs_t->rules_hash[bucket_idx];
 440}
 441
 442static u8 arfs_get_ip_proto(const struct sk_buff *skb)
 443{
 444        return (skb->protocol == htons(ETH_P_IP)) ?
 445                ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
 446}
 447
 448static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
 449                                         u8 ip_proto, __be16 etype)
 450{
 451        if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_TCP)
 452                return &arfs->arfs_tables[ARFS_IPV4_TCP];
 453        if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_UDP)
 454                return &arfs->arfs_tables[ARFS_IPV4_UDP];
 455        if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_TCP)
 456                return &arfs->arfs_tables[ARFS_IPV6_TCP];
 457        if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_UDP)
 458                return &arfs->arfs_tables[ARFS_IPV6_UDP];
 459
 460        return NULL;
 461}
 462
 463static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
 464                                              struct arfs_rule *arfs_rule)
 465{
 466        struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
 467        struct arfs_tuple *tuple = &arfs_rule->tuple;
 468        struct mlx5_flow_handle *rule = NULL;
 469        struct mlx5_flow_destination dest;
 470        MLX5_DECLARE_FLOW_ACT(flow_act);
 471        struct arfs_table *arfs_table;
 472        struct mlx5_flow_spec *spec;
 473        struct mlx5_flow_table *ft;
 474        int err = 0;
 475
 476        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
 477        if (!spec) {
 478                err = -ENOMEM;
 479                goto out;
 480        }
 481        spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 482        MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 483                         outer_headers.ethertype);
 484        MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype,
 485                 ntohs(tuple->etype));
 486        arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
 487        if (!arfs_table) {
 488                err = -EINVAL;
 489                goto out;
 490        }
 491
 492        ft = arfs_table->ft.t;
 493        if (tuple->ip_proto == IPPROTO_TCP) {
 494                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 495                                 outer_headers.tcp_dport);
 496                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 497                                 outer_headers.tcp_sport);
 498                MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport,
 499                         ntohs(tuple->dst_port));
 500                MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport,
 501                         ntohs(tuple->src_port));
 502        } else {
 503                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 504                                 outer_headers.udp_dport);
 505                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 506                                 outer_headers.udp_sport);
 507                MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport,
 508                         ntohs(tuple->dst_port));
 509                MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_sport,
 510                         ntohs(tuple->src_port));
 511        }
 512        if (tuple->etype == htons(ETH_P_IP)) {
 513                memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
 514                                    outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
 515                       &tuple->src_ipv4,
 516                       4);
 517                memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
 518                                    outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
 519                       &tuple->dst_ipv4,
 520                       4);
 521                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 522                                 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
 523                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 524                                 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
 525        } else {
 526                memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
 527                                    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
 528                       &tuple->src_ipv6,
 529                       16);
 530                memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
 531                                    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
 532                       &tuple->dst_ipv6,
 533                       16);
 534                memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
 535                                    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
 536                       0xff,
 537                       16);
 538                memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
 539                                    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
 540                       0xff,
 541                       16);
 542        }
 543        dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
 544        dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
 545        rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
 546        if (IS_ERR(rule)) {
 547                err = PTR_ERR(rule);
 548                netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
 549                           __func__, arfs_rule->filter_id, arfs_rule->rxq, err);
 550        }
 551
 552out:
 553        kvfree(spec);
 554        return err ? ERR_PTR(err) : rule;
 555}
 556
 557static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
 558                                struct mlx5_flow_handle *rule, u16 rxq)
 559{
 560        struct mlx5_flow_destination dst;
 561        int err = 0;
 562
 563        dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
 564        dst.tir_num = priv->direct_tir[rxq].tirn;
 565        err =  mlx5_modify_rule_destination(rule, &dst, NULL);
 566        if (err)
 567                netdev_warn(priv->netdev,
 568                            "Failed to modfiy aRFS rule destination to rq=%d\n", rxq);
 569}
 570
 571static void arfs_handle_work(struct work_struct *work)
 572{
 573        struct arfs_rule *arfs_rule = container_of(work,
 574                                                   struct arfs_rule,
 575                                                   arfs_work);
 576        struct mlx5e_priv *priv = arfs_rule->priv;
 577        struct mlx5_flow_handle *rule;
 578
 579        mutex_lock(&priv->state_lock);
 580        if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
 581                spin_lock_bh(&priv->fs.arfs.arfs_lock);
 582                hlist_del(&arfs_rule->hlist);
 583                spin_unlock_bh(&priv->fs.arfs.arfs_lock);
 584
 585                mutex_unlock(&priv->state_lock);
 586                kfree(arfs_rule);
 587                goto out;
 588        }
 589        mutex_unlock(&priv->state_lock);
 590
 591        if (!arfs_rule->rule) {
 592                rule = arfs_add_rule(priv, arfs_rule);
 593                if (IS_ERR(rule))
 594                        goto out;
 595                arfs_rule->rule = rule;
 596        } else {
 597                arfs_modify_rule_rq(priv, arfs_rule->rule,
 598                                    arfs_rule->rxq);
 599        }
 600out:
 601        arfs_may_expire_flow(priv);
 602}
 603
 604/* return L4 destination port from ip4/6 packets */
 605static __be16 arfs_get_dst_port(const struct sk_buff *skb)
 606{
 607        char *transport_header;
 608
 609        transport_header = skb_transport_header(skb);
 610        if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
 611                return ((struct tcphdr *)transport_header)->dest;
 612        return ((struct udphdr *)transport_header)->dest;
 613}
 614
 615/* return L4 source port from ip4/6 packets */
 616static __be16 arfs_get_src_port(const struct sk_buff *skb)
 617{
 618        char *transport_header;
 619
 620        transport_header = skb_transport_header(skb);
 621        if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
 622                return ((struct tcphdr *)transport_header)->source;
 623        return ((struct udphdr *)transport_header)->source;
 624}
 625
 626static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
 627                                         struct arfs_table *arfs_t,
 628                                         const struct sk_buff *skb,
 629                                         u16 rxq, u32 flow_id)
 630{
 631        struct arfs_rule *rule;
 632        struct arfs_tuple *tuple;
 633
 634        rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
 635        if (!rule)
 636                return NULL;
 637
 638        rule->priv = priv;
 639        rule->rxq = rxq;
 640        INIT_WORK(&rule->arfs_work, arfs_handle_work);
 641
 642        tuple = &rule->tuple;
 643        tuple->etype = skb->protocol;
 644        if (tuple->etype == htons(ETH_P_IP)) {
 645                tuple->src_ipv4 = ip_hdr(skb)->saddr;
 646                tuple->dst_ipv4 = ip_hdr(skb)->daddr;
 647        } else {
 648                memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
 649                       sizeof(struct in6_addr));
 650                memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
 651                       sizeof(struct in6_addr));
 652        }
 653        tuple->ip_proto = arfs_get_ip_proto(skb);
 654        tuple->src_port = arfs_get_src_port(skb);
 655        tuple->dst_port = arfs_get_dst_port(skb);
 656
 657        rule->flow_id = flow_id;
 658        rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
 659
 660        hlist_add_head(&rule->hlist,
 661                       arfs_hash_bucket(arfs_t, tuple->src_port,
 662                                        tuple->dst_port));
 663        return rule;
 664}
 665
 666static bool arfs_cmp_ips(struct arfs_tuple *tuple,
 667                         const struct sk_buff *skb)
 668{
 669        if (tuple->etype == htons(ETH_P_IP) &&
 670            tuple->src_ipv4 == ip_hdr(skb)->saddr &&
 671            tuple->dst_ipv4 == ip_hdr(skb)->daddr)
 672                return true;
 673        if (tuple->etype == htons(ETH_P_IPV6) &&
 674            (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
 675                     sizeof(struct in6_addr))) &&
 676            (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
 677                     sizeof(struct in6_addr))))
 678                return true;
 679        return false;
 680}
 681
 682static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
 683                                        const struct sk_buff *skb)
 684{
 685        struct arfs_rule *arfs_rule;
 686        struct hlist_head *head;
 687        __be16 src_port = arfs_get_src_port(skb);
 688        __be16 dst_port = arfs_get_dst_port(skb);
 689
 690        head = arfs_hash_bucket(arfs_t, src_port, dst_port);
 691        hlist_for_each_entry(arfs_rule, head, hlist) {
 692                if (arfs_rule->tuple.src_port == src_port &&
 693                    arfs_rule->tuple.dst_port == dst_port &&
 694                    arfs_cmp_ips(&arfs_rule->tuple, skb)) {
 695                        return arfs_rule;
 696                }
 697        }
 698
 699        return NULL;
 700}
 701
 702int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
 703                        u16 rxq_index, u32 flow_id)
 704{
 705        struct mlx5e_priv *priv = netdev_priv(dev);
 706        struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
 707        struct arfs_table *arfs_t;
 708        struct arfs_rule *arfs_rule;
 709
 710        if (skb->protocol != htons(ETH_P_IP) &&
 711            skb->protocol != htons(ETH_P_IPV6))
 712                return -EPROTONOSUPPORT;
 713
 714        arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
 715        if (!arfs_t)
 716                return -EPROTONOSUPPORT;
 717
 718        spin_lock_bh(&arfs->arfs_lock);
 719        arfs_rule = arfs_find_rule(arfs_t, skb);
 720        if (arfs_rule) {
 721                if (arfs_rule->rxq == rxq_index) {
 722                        spin_unlock_bh(&arfs->arfs_lock);
 723                        return arfs_rule->filter_id;
 724                }
 725                arfs_rule->rxq = rxq_index;
 726        } else {
 727                arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
 728                                            rxq_index, flow_id);
 729                if (!arfs_rule) {
 730                        spin_unlock_bh(&arfs->arfs_lock);
 731                        return -ENOMEM;
 732                }
 733        }
 734        queue_work(priv->fs.arfs.wq, &arfs_rule->arfs_work);
 735        spin_unlock_bh(&arfs->arfs_lock);
 736        return arfs_rule->filter_id;
 737}
 738#endif
 739