linux/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/mlx5/fs.h>
  34#include "en.h"
  35
  36struct mlx5e_ethtool_rule {
  37        struct list_head             list;
  38        struct ethtool_rx_flow_spec  flow_spec;
  39        struct mlx5_flow_handle      *rule;
  40        struct mlx5e_ethtool_table   *eth_ft;
  41};
  42
  43static void put_flow_table(struct mlx5e_ethtool_table *eth_ft)
  44{
  45        if (!--eth_ft->num_rules) {
  46                mlx5_destroy_flow_table(eth_ft->ft);
  47                eth_ft->ft = NULL;
  48        }
  49}
  50
  51#define MLX5E_ETHTOOL_L3_L4_PRIO 0
  52#define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS)
  53#define MLX5E_ETHTOOL_NUM_ENTRIES 64000
  54#define MLX5E_ETHTOOL_NUM_GROUPS  10
  55static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
  56                                                  struct ethtool_rx_flow_spec *fs,
  57                                                  int num_tuples)
  58{
  59        struct mlx5e_ethtool_table *eth_ft;
  60        struct mlx5_flow_namespace *ns;
  61        struct mlx5_flow_table *ft;
  62        int max_tuples;
  63        int table_size;
  64        int prio;
  65
  66        switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
  67        case TCP_V4_FLOW:
  68        case UDP_V4_FLOW:
  69                max_tuples = ETHTOOL_NUM_L3_L4_FTS;
  70                prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
  71                eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
  72                break;
  73        case IP_USER_FLOW:
  74                max_tuples = ETHTOOL_NUM_L3_L4_FTS;
  75                prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
  76                eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
  77                break;
  78        case ETHER_FLOW:
  79                max_tuples = ETHTOOL_NUM_L2_FTS;
  80                prio = max_tuples - num_tuples;
  81                eth_ft = &priv->fs.ethtool.l2_ft[prio];
  82                prio += MLX5E_ETHTOOL_L2_PRIO;
  83                break;
  84        default:
  85                return ERR_PTR(-EINVAL);
  86        }
  87
  88        eth_ft->num_rules++;
  89        if (eth_ft->ft)
  90                return eth_ft;
  91
  92        ns = mlx5_get_flow_namespace(priv->mdev,
  93                                     MLX5_FLOW_NAMESPACE_ETHTOOL);
  94        if (!ns)
  95                return ERR_PTR(-EOPNOTSUPP);
  96
  97        table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
  98                                                       flow_table_properties_nic_receive.log_max_ft_size)),
  99                           MLX5E_ETHTOOL_NUM_ENTRIES);
 100        ft = mlx5_create_auto_grouped_flow_table(ns, prio,
 101                                                 table_size,
 102                                                 MLX5E_ETHTOOL_NUM_GROUPS, 0, 0);
 103        if (IS_ERR(ft))
 104                return (void *)ft;
 105
 106        eth_ft->ft = ft;
 107        return eth_ft;
 108}
 109
 110static void mask_spec(u8 *mask, u8 *val, size_t size)
 111{
 112        unsigned int i;
 113
 114        for (i = 0; i < size; i++, mask++, val++)
 115                *((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
 116}
 117
 118static void set_ips(void *outer_headers_v, void *outer_headers_c, __be32 ip4src_m,
 119                    __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
 120{
 121        if (ip4src_m) {
 122                memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
 123                                    src_ipv4_src_ipv6.ipv4_layout.ipv4),
 124                       &ip4src_v, sizeof(ip4src_v));
 125                memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
 126                                    src_ipv4_src_ipv6.ipv4_layout.ipv4),
 127                       0xff, sizeof(ip4src_m));
 128        }
 129        if (ip4dst_m) {
 130                memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
 131                                    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
 132                       &ip4dst_v, sizeof(ip4dst_v));
 133                memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
 134                                    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
 135                       0xff, sizeof(ip4dst_m));
 136        }
 137        MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
 138                 ethertype, ETH_P_IP);
 139        MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
 140                 ethertype, 0xffff);
 141}
 142
 143static int set_flow_attrs(u32 *match_c, u32 *match_v,
 144                          struct ethtool_rx_flow_spec *fs)
 145{
 146        void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
 147                                             outer_headers);
 148        void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
 149                                             outer_headers);
 150        u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
 151        struct ethtool_tcpip4_spec *l4_mask;
 152        struct ethtool_tcpip4_spec *l4_val;
 153        struct ethtool_usrip4_spec *l3_mask;
 154        struct ethtool_usrip4_spec *l3_val;
 155        struct ethhdr *eth_val;
 156        struct ethhdr *eth_mask;
 157
 158        switch (flow_type) {
 159        case TCP_V4_FLOW:
 160                l4_mask = &fs->m_u.tcp_ip4_spec;
 161                l4_val = &fs->h_u.tcp_ip4_spec;
 162                set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
 163                        l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
 164
 165                if (l4_mask->psrc) {
 166                        MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
 167                                 0xffff);
 168                        MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
 169                                 ntohs(l4_val->psrc));
 170                }
 171                if (l4_mask->pdst) {
 172                        MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
 173                                 0xffff);
 174                        MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
 175                                 ntohs(l4_val->pdst));
 176                }
 177                MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
 178                         0xffff);
 179                MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
 180                         IPPROTO_TCP);
 181                break;
 182        case UDP_V4_FLOW:
 183                l4_mask = &fs->m_u.tcp_ip4_spec;
 184                l4_val = &fs->h_u.tcp_ip4_spec;
 185                set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
 186                        l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
 187
 188                if (l4_mask->psrc) {
 189                        MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
 190                                 0xffff);
 191                        MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
 192                                 ntohs(l4_val->psrc));
 193                }
 194                if (l4_mask->pdst) {
 195                        MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
 196                                 0xffff);
 197                        MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
 198                                 ntohs(l4_val->pdst));
 199                }
 200                MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
 201                         0xffff);
 202                MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
 203                         IPPROTO_UDP);
 204                break;
 205        case IP_USER_FLOW:
 206                l3_mask = &fs->m_u.usr_ip4_spec;
 207                l3_val = &fs->h_u.usr_ip4_spec;
 208                set_ips(outer_headers_v, outer_headers_c, l3_mask->ip4src,
 209                        l3_val->ip4src, l3_mask->ip4dst, l3_val->ip4dst);
 210                break;
 211        case ETHER_FLOW:
 212                eth_mask = &fs->m_u.ether_spec;
 213                eth_val = &fs->h_u.ether_spec;
 214
 215                mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
 216                ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
 217                                             outer_headers_c, smac_47_16),
 218                                eth_mask->h_source);
 219                ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
 220                                             outer_headers_v, smac_47_16),
 221                                eth_val->h_source);
 222                ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
 223                                             outer_headers_c, dmac_47_16),
 224                                eth_mask->h_dest);
 225                ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
 226                                             outer_headers_v, dmac_47_16),
 227                                eth_val->h_dest);
 228                MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ethertype,
 229                         ntohs(eth_mask->h_proto));
 230                MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ethertype,
 231                         ntohs(eth_val->h_proto));
 232                break;
 233        default:
 234                return -EINVAL;
 235        }
 236
 237        if ((fs->flow_type & FLOW_EXT) &&
 238            (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
 239                MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
 240                         cvlan_tag, 1);
 241                MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
 242                         cvlan_tag, 1);
 243                MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
 244                         first_vid, 0xfff);
 245                MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
 246                         first_vid, ntohs(fs->h_ext.vlan_tci));
 247        }
 248        if (fs->flow_type & FLOW_MAC_EXT &&
 249            !is_zero_ether_addr(fs->m_ext.h_dest)) {
 250                mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
 251                ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
 252                                             outer_headers_c, dmac_47_16),
 253                                fs->m_ext.h_dest);
 254                ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
 255                                             outer_headers_v, dmac_47_16),
 256                                fs->h_ext.h_dest);
 257        }
 258
 259        return 0;
 260}
 261
 262static void add_rule_to_list(struct mlx5e_priv *priv,
 263                             struct mlx5e_ethtool_rule *rule)
 264{
 265        struct mlx5e_ethtool_rule *iter;
 266        struct list_head *head = &priv->fs.ethtool.rules;
 267
 268        list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
 269                if (iter->flow_spec.location > rule->flow_spec.location)
 270                        break;
 271                head = &iter->list;
 272        }
 273        priv->fs.ethtool.tot_num_rules++;
 274        list_add(&rule->list, head);
 275}
 276
 277static bool outer_header_zero(u32 *match_criteria)
 278{
 279        int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
 280        char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
 281                                             outer_headers);
 282
 283        return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
 284                                                  outer_headers_c + 1,
 285                                                  size - 1);
 286}
 287
 288static struct mlx5_flow_handle *
 289add_ethtool_flow_rule(struct mlx5e_priv *priv,
 290                      struct mlx5_flow_table *ft,
 291                      struct ethtool_rx_flow_spec *fs)
 292{
 293        struct mlx5_flow_destination *dst = NULL;
 294        struct mlx5_flow_act flow_act = {0};
 295        struct mlx5_flow_spec *spec;
 296        struct mlx5_flow_handle *rule;
 297        int err = 0;
 298
 299        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
 300        if (!spec)
 301                return ERR_PTR(-ENOMEM);
 302        err = set_flow_attrs(spec->match_criteria, spec->match_value,
 303                             fs);
 304        if (err)
 305                goto free;
 306
 307        if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
 308                flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
 309        } else {
 310                dst = kzalloc(sizeof(*dst), GFP_KERNEL);
 311                if (!dst) {
 312                        err = -ENOMEM;
 313                        goto free;
 314                }
 315
 316                dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
 317                dst->tir_num = priv->direct_tir[fs->ring_cookie].tirn;
 318                flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
 319        }
 320
 321        spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
 322        flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
 323        rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
 324        if (IS_ERR(rule)) {
 325                err = PTR_ERR(rule);
 326                netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
 327                           __func__, err);
 328                goto free;
 329        }
 330free:
 331        kvfree(spec);
 332        kfree(dst);
 333        return err ? ERR_PTR(err) : rule;
 334}
 335
 336static void del_ethtool_rule(struct mlx5e_priv *priv,
 337                             struct mlx5e_ethtool_rule *eth_rule)
 338{
 339        if (eth_rule->rule)
 340                mlx5_del_flow_rules(eth_rule->rule);
 341        list_del(&eth_rule->list);
 342        priv->fs.ethtool.tot_num_rules--;
 343        put_flow_table(eth_rule->eth_ft);
 344        kfree(eth_rule);
 345}
 346
 347static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
 348                                                    int location)
 349{
 350        struct mlx5e_ethtool_rule *iter;
 351
 352        list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
 353                if (iter->flow_spec.location == location)
 354                        return iter;
 355        }
 356        return NULL;
 357}
 358
 359static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
 360                                                   int location)
 361{
 362        struct mlx5e_ethtool_rule *eth_rule;
 363
 364        eth_rule = find_ethtool_rule(priv, location);
 365        if (eth_rule)
 366                del_ethtool_rule(priv, eth_rule);
 367
 368        eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
 369        if (!eth_rule)
 370                return ERR_PTR(-ENOMEM);
 371
 372        add_rule_to_list(priv, eth_rule);
 373        return eth_rule;
 374}
 375
 376#define MAX_NUM_OF_ETHTOOL_RULES BIT(10)
 377
 378#define all_ones(field) (field == (__force typeof(field))-1)
 379#define all_zeros_or_all_ones(field)            \
 380        ((field) == 0 || (field) == (__force typeof(field))-1)
 381
 382static int validate_flow(struct mlx5e_priv *priv,
 383                         struct ethtool_rx_flow_spec *fs)
 384{
 385        struct ethtool_tcpip4_spec *l4_mask;
 386        struct ethtool_usrip4_spec *l3_mask;
 387        struct ethhdr *eth_mask;
 388        int num_tuples = 0;
 389
 390        if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
 391                return -EINVAL;
 392
 393        if (fs->ring_cookie >= priv->channels.params.num_channels &&
 394            fs->ring_cookie != RX_CLS_FLOW_DISC)
 395                return -EINVAL;
 396
 397        switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
 398        case ETHER_FLOW:
 399                eth_mask = &fs->m_u.ether_spec;
 400                if (!is_zero_ether_addr(eth_mask->h_dest))
 401                        num_tuples++;
 402                if (!is_zero_ether_addr(eth_mask->h_source))
 403                        num_tuples++;
 404                if (eth_mask->h_proto)
 405                        num_tuples++;
 406                break;
 407        case TCP_V4_FLOW:
 408        case UDP_V4_FLOW:
 409                if (fs->m_u.tcp_ip4_spec.tos)
 410                        return -EINVAL;
 411                l4_mask = &fs->m_u.tcp_ip4_spec;
 412                if (l4_mask->ip4src) {
 413                        if (!all_ones(l4_mask->ip4src))
 414                                return -EINVAL;
 415                        num_tuples++;
 416                }
 417                if (l4_mask->ip4dst) {
 418                        if (!all_ones(l4_mask->ip4dst))
 419                                return -EINVAL;
 420                        num_tuples++;
 421                }
 422                if (l4_mask->psrc) {
 423                        if (!all_ones(l4_mask->psrc))
 424                                return -EINVAL;
 425                        num_tuples++;
 426                }
 427                if (l4_mask->pdst) {
 428                        if (!all_ones(l4_mask->pdst))
 429                                return -EINVAL;
 430                        num_tuples++;
 431                }
 432                /* Flow is TCP/UDP */
 433                num_tuples++;
 434                break;
 435        case IP_USER_FLOW:
 436                l3_mask = &fs->m_u.usr_ip4_spec;
 437                if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
 438                    fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
 439                        return -EINVAL;
 440                if (l3_mask->ip4src) {
 441                        if (!all_ones(l3_mask->ip4src))
 442                                return -EINVAL;
 443                        num_tuples++;
 444                }
 445                if (l3_mask->ip4dst) {
 446                        if (!all_ones(l3_mask->ip4dst))
 447                                return -EINVAL;
 448                        num_tuples++;
 449                }
 450                /* Flow is IPv4 */
 451                num_tuples++;
 452                break;
 453        default:
 454                return -EINVAL;
 455        }
 456        if ((fs->flow_type & FLOW_EXT)) {
 457                if (fs->m_ext.vlan_etype ||
 458                    (fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK)))
 459                        return -EINVAL;
 460
 461                if (fs->m_ext.vlan_tci) {
 462                        if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
 463                                return -EINVAL;
 464                }
 465                num_tuples++;
 466        }
 467
 468        if (fs->flow_type & FLOW_MAC_EXT &&
 469            !is_zero_ether_addr(fs->m_ext.h_dest))
 470                num_tuples++;
 471
 472        return num_tuples;
 473}
 474
 475int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
 476                               struct ethtool_rx_flow_spec *fs)
 477{
 478        struct mlx5e_ethtool_table *eth_ft;
 479        struct mlx5e_ethtool_rule *eth_rule;
 480        struct mlx5_flow_handle *rule;
 481        int num_tuples;
 482        int err;
 483
 484        num_tuples = validate_flow(priv, fs);
 485        if (num_tuples <= 0) {
 486                netdev_warn(priv->netdev, "%s: flow is not valid\n",  __func__);
 487                return -EINVAL;
 488        }
 489
 490        eth_ft = get_flow_table(priv, fs, num_tuples);
 491        if (IS_ERR(eth_ft))
 492                return PTR_ERR(eth_ft);
 493
 494        eth_rule = get_ethtool_rule(priv, fs->location);
 495        if (IS_ERR(eth_rule)) {
 496                put_flow_table(eth_ft);
 497                return PTR_ERR(eth_rule);
 498        }
 499
 500        eth_rule->flow_spec = *fs;
 501        eth_rule->eth_ft = eth_ft;
 502        if (!eth_ft->ft) {
 503                err = -EINVAL;
 504                goto del_ethtool_rule;
 505        }
 506        rule = add_ethtool_flow_rule(priv, eth_ft->ft, fs);
 507        if (IS_ERR(rule)) {
 508                err = PTR_ERR(rule);
 509                goto del_ethtool_rule;
 510        }
 511
 512        eth_rule->rule = rule;
 513
 514        return 0;
 515
 516del_ethtool_rule:
 517        del_ethtool_rule(priv, eth_rule);
 518
 519        return err;
 520}
 521
 522int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
 523                              int location)
 524{
 525        struct mlx5e_ethtool_rule *eth_rule;
 526        int err = 0;
 527
 528        if (location >= MAX_NUM_OF_ETHTOOL_RULES)
 529                return -ENOSPC;
 530
 531        eth_rule = find_ethtool_rule(priv, location);
 532        if (!eth_rule) {
 533                err =  -ENOENT;
 534                goto out;
 535        }
 536
 537        del_ethtool_rule(priv, eth_rule);
 538out:
 539        return err;
 540}
 541
 542int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
 543                           int location)
 544{
 545        struct mlx5e_ethtool_rule *eth_rule;
 546
 547        if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
 548                return -EINVAL;
 549
 550        list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
 551                if (eth_rule->flow_spec.location == location) {
 552                        info->fs = eth_rule->flow_spec;
 553                        return 0;
 554                }
 555        }
 556
 557        return -ENOENT;
 558}
 559
 560int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
 561                                u32 *rule_locs)
 562{
 563        int location = 0;
 564        int idx = 0;
 565        int err = 0;
 566
 567        info->data = MAX_NUM_OF_ETHTOOL_RULES;
 568        while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
 569                err = mlx5e_ethtool_get_flow(priv, info, location);
 570                if (!err)
 571                        rule_locs[idx++] = location;
 572                location++;
 573        }
 574        return err;
 575}
 576
 577void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
 578{
 579        struct mlx5e_ethtool_rule *iter;
 580        struct mlx5e_ethtool_rule *temp;
 581
 582        list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
 583                del_ethtool_rule(priv, iter);
 584}
 585
 586void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
 587{
 588        INIT_LIST_HEAD(&priv->fs.ethtool.rules);
 589}
 590