linux/drivers/net/ethernet/qlogic/qede/qede_filter.c
<<
>>
Prefs
   1/* QLogic qede NIC Driver
   2 * Copyright (c) 2015-2017  QLogic Corporation
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and /or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#include <linux/netdevice.h>
  33#include <linux/etherdevice.h>
  34#include <net/udp_tunnel.h>
  35#include <linux/bitops.h>
  36#include <linux/vmalloc.h>
  37
  38#include <linux/qed/qed_if.h>
  39#include "qede.h"
  40
  41#define QEDE_FILTER_PRINT_MAX_LEN       (64)
  42struct qede_arfs_tuple {
  43        union {
  44                __be32 src_ipv4;
  45                struct in6_addr src_ipv6;
  46        };
  47        union {
  48                __be32 dst_ipv4;
  49                struct in6_addr dst_ipv6;
  50        };
  51        __be16  src_port;
  52        __be16  dst_port;
  53        __be16  eth_proto;
  54        u8      ip_proto;
  55
  56        /* Describe filtering mode needed for this kind of filter */
  57        enum qed_filter_config_mode mode;
  58
  59        /* Used to compare new/old filters. Return true if IPs match */
  60        bool (*ip_comp)(struct qede_arfs_tuple *a, struct qede_arfs_tuple *b);
  61
  62        /* Given an address into ethhdr build a header from tuple info */
  63        void (*build_hdr)(struct qede_arfs_tuple *t, void *header);
  64
  65        /* Stringify the tuple for a print into the provided buffer */
  66        void (*stringify)(struct qede_arfs_tuple *t, void *buffer);
  67};
  68
  69struct qede_arfs_fltr_node {
  70#define QEDE_FLTR_VALID  0
  71        unsigned long state;
  72
  73        /* pointer to aRFS packet buffer */
  74        void *data;
  75
  76        /* dma map address of aRFS packet buffer */
  77        dma_addr_t mapping;
  78
  79        /* length of aRFS packet buffer */
  80        int buf_len;
  81
  82        /* tuples to hold from aRFS packet buffer */
  83        struct qede_arfs_tuple tuple;
  84
  85        u32 flow_id;
  86        u64 sw_id;
  87        u16 rxq_id;
  88        u16 next_rxq_id;
  89        u8 vfid;
  90        bool filter_op;
  91        bool used;
  92        u8 fw_rc;
  93        bool b_is_drop;
  94        struct hlist_node node;
  95};
  96
  97struct qede_arfs {
  98#define QEDE_ARFS_BUCKET_HEAD(edev, idx) (&(edev)->arfs->arfs_hl_head[idx])
  99#define QEDE_ARFS_POLL_COUNT    100
 100#define QEDE_RFS_FLW_BITSHIFT   (4)
 101#define QEDE_RFS_FLW_MASK       ((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
 102        struct hlist_head       arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT];
 103
 104        /* lock for filter list access */
 105        spinlock_t              arfs_list_lock;
 106        unsigned long           *arfs_fltr_bmap;
 107        int                     filter_count;
 108
 109        /* Currently configured filtering mode */
 110        enum qed_filter_config_mode mode;
 111};
 112
 113static void qede_configure_arfs_fltr(struct qede_dev *edev,
 114                                     struct qede_arfs_fltr_node *n,
 115                                     u16 rxq_id, bool add_fltr)
 116{
 117        const struct qed_eth_ops *op = edev->ops;
 118        struct qed_ntuple_filter_params params;
 119
 120        if (n->used)
 121                return;
 122
 123        memset(&params, 0, sizeof(params));
 124
 125        params.addr = n->mapping;
 126        params.length = n->buf_len;
 127        params.qid = rxq_id;
 128        params.b_is_add = add_fltr;
 129        params.b_is_drop = n->b_is_drop;
 130
 131        if (n->vfid) {
 132                params.b_is_vf = true;
 133                params.vf_id = n->vfid - 1;
 134        }
 135
 136        if (n->tuple.stringify) {
 137                char tuple_buffer[QEDE_FILTER_PRINT_MAX_LEN];
 138
 139                n->tuple.stringify(&n->tuple, tuple_buffer);
 140                DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
 141                           "%s sw_id[0x%llx]: %s [vf %u queue %d]\n",
 142                           add_fltr ? "Adding" : "Deleting",
 143                           n->sw_id, tuple_buffer, n->vfid, rxq_id);
 144        }
 145
 146        n->used = true;
 147        n->filter_op = add_fltr;
 148        op->ntuple_filter_config(edev->cdev, n, &params);
 149}
 150
 151static void
 152qede_free_arfs_filter(struct qede_dev *edev,  struct qede_arfs_fltr_node *fltr)
 153{
 154        kfree(fltr->data);
 155
 156        if (fltr->sw_id < QEDE_RFS_MAX_FLTR)
 157                clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
 158
 159        kfree(fltr);
 160}
 161
 162static int
 163qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
 164                                      struct qede_arfs_fltr_node *fltr,
 165                                      u16 bucket_idx)
 166{
 167        fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data,
 168                                       fltr->buf_len, DMA_TO_DEVICE);
 169        if (dma_mapping_error(&edev->pdev->dev, fltr->mapping)) {
 170                DP_NOTICE(edev, "Failed to map DMA memory for rule\n");
 171                qede_free_arfs_filter(edev, fltr);
 172                return -ENOMEM;
 173        }
 174
 175        INIT_HLIST_NODE(&fltr->node);
 176        hlist_add_head(&fltr->node,
 177                       QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx));
 178
 179        edev->arfs->filter_count++;
 180        if (edev->arfs->filter_count == 1 &&
 181            edev->arfs->mode == QED_FILTER_CONFIG_MODE_DISABLE) {
 182                edev->ops->configure_arfs_searcher(edev->cdev,
 183                                                   fltr->tuple.mode);
 184                edev->arfs->mode = fltr->tuple.mode;
 185        }
 186
 187        return 0;
 188}
 189
 190static void
 191qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
 192                                      struct qede_arfs_fltr_node *fltr)
 193{
 194        hlist_del(&fltr->node);
 195        dma_unmap_single(&edev->pdev->dev, fltr->mapping,
 196                         fltr->buf_len, DMA_TO_DEVICE);
 197
 198        qede_free_arfs_filter(edev, fltr);
 199
 200        edev->arfs->filter_count--;
 201        if (!edev->arfs->filter_count &&
 202            edev->arfs->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
 203                enum qed_filter_config_mode mode;
 204
 205                mode = QED_FILTER_CONFIG_MODE_DISABLE;
 206                edev->ops->configure_arfs_searcher(edev->cdev, mode);
 207                edev->arfs->mode = QED_FILTER_CONFIG_MODE_DISABLE;
 208        }
 209}
 210
 211void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
 212{
 213        struct qede_arfs_fltr_node *fltr = filter;
 214        struct qede_dev *edev = dev;
 215
 216        fltr->fw_rc = fw_rc;
 217
 218        if (fw_rc) {
 219                DP_NOTICE(edev,
 220                          "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=0x%llx, src_port=%d, dst_port=%d, rxq=%d\n",
 221                          fw_rc, fltr->flow_id, fltr->sw_id,
 222                          ntohs(fltr->tuple.src_port),
 223                          ntohs(fltr->tuple.dst_port), fltr->rxq_id);
 224
 225                spin_lock_bh(&edev->arfs->arfs_list_lock);
 226
 227                fltr->used = false;
 228                clear_bit(QEDE_FLTR_VALID, &fltr->state);
 229
 230                spin_unlock_bh(&edev->arfs->arfs_list_lock);
 231                return;
 232        }
 233
 234        spin_lock_bh(&edev->arfs->arfs_list_lock);
 235
 236        fltr->used = false;
 237
 238        if (fltr->filter_op) {
 239                set_bit(QEDE_FLTR_VALID, &fltr->state);
 240                if (fltr->rxq_id != fltr->next_rxq_id)
 241                        qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
 242                                                 false);
 243        } else {
 244                clear_bit(QEDE_FLTR_VALID, &fltr->state);
 245                if (fltr->rxq_id != fltr->next_rxq_id) {
 246                        fltr->rxq_id = fltr->next_rxq_id;
 247                        qede_configure_arfs_fltr(edev, fltr,
 248                                                 fltr->rxq_id, true);
 249                }
 250        }
 251
 252        spin_unlock_bh(&edev->arfs->arfs_list_lock);
 253}
 254
 255/* Should be called while qede_lock is held */
 256void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
 257{
 258        int i;
 259
 260        for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) {
 261                struct hlist_node *temp;
 262                struct hlist_head *head;
 263                struct qede_arfs_fltr_node *fltr;
 264
 265                head = &edev->arfs->arfs_hl_head[i];
 266
 267                hlist_for_each_entry_safe(fltr, temp, head, node) {
 268                        bool del = false;
 269
 270                        if (edev->state != QEDE_STATE_OPEN)
 271                                del = true;
 272
 273                        spin_lock_bh(&edev->arfs->arfs_list_lock);
 274
 275                        if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
 276                             !fltr->used) || free_fltr) {
 277                                qede_dequeue_fltr_and_config_searcher(edev,
 278                                                                      fltr);
 279                        } else {
 280                                bool flow_exp = false;
 281#ifdef CONFIG_RFS_ACCEL
 282                                flow_exp = rps_may_expire_flow(edev->ndev,
 283                                                               fltr->rxq_id,
 284                                                               fltr->flow_id,
 285                                                               fltr->sw_id);
 286#endif
 287                                if ((flow_exp || del) && !free_fltr)
 288                                        qede_configure_arfs_fltr(edev, fltr,
 289                                                                 fltr->rxq_id,
 290                                                                 false);
 291                        }
 292
 293                        spin_unlock_bh(&edev->arfs->arfs_list_lock);
 294                }
 295        }
 296
 297#ifdef CONFIG_RFS_ACCEL
 298        spin_lock_bh(&edev->arfs->arfs_list_lock);
 299
 300        if (edev->arfs->filter_count) {
 301                set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
 302                schedule_delayed_work(&edev->sp_task,
 303                                      QEDE_SP_TASK_POLL_DELAY);
 304        }
 305
 306        spin_unlock_bh(&edev->arfs->arfs_list_lock);
 307#endif
 308}
 309
 310/* This function waits until all aRFS filters get deleted and freed.
 311 * On timeout it frees all filters forcefully.
 312 */
 313void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev)
 314{
 315        int count = QEDE_ARFS_POLL_COUNT;
 316
 317        while (count) {
 318                qede_process_arfs_filters(edev, false);
 319
 320                if (!edev->arfs->filter_count)
 321                        break;
 322
 323                msleep(100);
 324                count--;
 325        }
 326
 327        if (!count) {
 328                DP_NOTICE(edev, "Timeout in polling for arfs filter free\n");
 329
 330                /* Something is terribly wrong, free forcefully */
 331                qede_process_arfs_filters(edev, true);
 332        }
 333}
 334
 335int qede_alloc_arfs(struct qede_dev *edev)
 336{
 337        int i;
 338
 339        edev->arfs = vzalloc(sizeof(*edev->arfs));
 340        if (!edev->arfs)
 341                return -ENOMEM;
 342
 343        spin_lock_init(&edev->arfs->arfs_list_lock);
 344
 345        for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
 346                INIT_HLIST_HEAD(QEDE_ARFS_BUCKET_HEAD(edev, i));
 347
 348        edev->arfs->arfs_fltr_bmap =
 349                vzalloc(array_size(sizeof(long),
 350                                   BITS_TO_LONGS(QEDE_RFS_MAX_FLTR)));
 351        if (!edev->arfs->arfs_fltr_bmap) {
 352                vfree(edev->arfs);
 353                edev->arfs = NULL;
 354                return -ENOMEM;
 355        }
 356
 357#ifdef CONFIG_RFS_ACCEL
 358        edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
 359        if (!edev->ndev->rx_cpu_rmap) {
 360                vfree(edev->arfs->arfs_fltr_bmap);
 361                edev->arfs->arfs_fltr_bmap = NULL;
 362                vfree(edev->arfs);
 363                edev->arfs = NULL;
 364                return -ENOMEM;
 365        }
 366#endif
 367        return 0;
 368}
 369
 370void qede_free_arfs(struct qede_dev *edev)
 371{
 372        if (!edev->arfs)
 373                return;
 374
 375#ifdef CONFIG_RFS_ACCEL
 376        if (edev->ndev->rx_cpu_rmap)
 377                free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
 378
 379        edev->ndev->rx_cpu_rmap = NULL;
 380#endif
 381        vfree(edev->arfs->arfs_fltr_bmap);
 382        edev->arfs->arfs_fltr_bmap = NULL;
 383        vfree(edev->arfs);
 384        edev->arfs = NULL;
 385}
 386
 387#ifdef CONFIG_RFS_ACCEL
 388static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
 389                                 const struct sk_buff *skb)
 390{
 391        if (skb->protocol == htons(ETH_P_IP)) {
 392                if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
 393                    tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
 394                        return true;
 395                else
 396                        return false;
 397        } else {
 398                struct in6_addr *src = &tpos->tuple.src_ipv6;
 399                u8 size = sizeof(struct in6_addr);
 400
 401                if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
 402                    !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
 403                        return true;
 404                else
 405                        return false;
 406        }
 407}
 408
 409static struct qede_arfs_fltr_node *
 410qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
 411                          __be16 src_port, __be16 dst_port, u8 ip_proto)
 412{
 413        struct qede_arfs_fltr_node *tpos;
 414
 415        hlist_for_each_entry(tpos, h, node)
 416                if (tpos->tuple.ip_proto == ip_proto &&
 417                    tpos->tuple.eth_proto == skb->protocol &&
 418                    qede_compare_ip_addr(tpos, skb) &&
 419                    tpos->tuple.src_port == src_port &&
 420                    tpos->tuple.dst_port == dst_port)
 421                        return tpos;
 422
 423        return NULL;
 424}
 425
 426static struct qede_arfs_fltr_node *
 427qede_alloc_filter(struct qede_dev *edev, int min_hlen)
 428{
 429        struct qede_arfs_fltr_node *n;
 430        int bit_id;
 431
 432        bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap,
 433                                     QEDE_RFS_MAX_FLTR);
 434
 435        if (bit_id >= QEDE_RFS_MAX_FLTR)
 436                return NULL;
 437
 438        n = kzalloc(sizeof(*n), GFP_ATOMIC);
 439        if (!n)
 440                return NULL;
 441
 442        n->data = kzalloc(min_hlen, GFP_ATOMIC);
 443        if (!n->data) {
 444                kfree(n);
 445                return NULL;
 446        }
 447
 448        n->sw_id = (u16)bit_id;
 449        set_bit(bit_id, edev->arfs->arfs_fltr_bmap);
 450        return n;
 451}
 452
 453int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
 454                       u16 rxq_index, u32 flow_id)
 455{
 456        struct qede_dev *edev = netdev_priv(dev);
 457        struct qede_arfs_fltr_node *n;
 458        int min_hlen, rc, tp_offset;
 459        struct ethhdr *eth;
 460        __be16 *ports;
 461        u16 tbl_idx;
 462        u8 ip_proto;
 463
 464        if (skb->encapsulation)
 465                return -EPROTONOSUPPORT;
 466
 467        if (skb->protocol != htons(ETH_P_IP) &&
 468            skb->protocol != htons(ETH_P_IPV6))
 469                return -EPROTONOSUPPORT;
 470
 471        if (skb->protocol == htons(ETH_P_IP)) {
 472                ip_proto = ip_hdr(skb)->protocol;
 473                tp_offset = sizeof(struct iphdr);
 474        } else {
 475                ip_proto = ipv6_hdr(skb)->nexthdr;
 476                tp_offset = sizeof(struct ipv6hdr);
 477        }
 478
 479        if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
 480                return -EPROTONOSUPPORT;
 481
 482        ports = (__be16 *)(skb->data + tp_offset);
 483        tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
 484
 485        spin_lock_bh(&edev->arfs->arfs_list_lock);
 486
 487        n = qede_arfs_htbl_key_search(QEDE_ARFS_BUCKET_HEAD(edev, tbl_idx),
 488                                      skb, ports[0], ports[1], ip_proto);
 489        if (n) {
 490                /* Filter match */
 491                n->next_rxq_id = rxq_index;
 492
 493                if (test_bit(QEDE_FLTR_VALID, &n->state)) {
 494                        if (n->rxq_id != rxq_index)
 495                                qede_configure_arfs_fltr(edev, n, n->rxq_id,
 496                                                         false);
 497                } else {
 498                        if (!n->used) {
 499                                n->rxq_id = rxq_index;
 500                                qede_configure_arfs_fltr(edev, n, n->rxq_id,
 501                                                         true);
 502                        }
 503                }
 504
 505                rc = n->sw_id;
 506                goto ret_unlock;
 507        }
 508
 509        min_hlen = ETH_HLEN + skb_headlen(skb);
 510
 511        n = qede_alloc_filter(edev, min_hlen);
 512        if (!n) {
 513                rc = -ENOMEM;
 514                goto ret_unlock;
 515        }
 516
 517        n->buf_len = min_hlen;
 518        n->rxq_id = rxq_index;
 519        n->next_rxq_id = rxq_index;
 520        n->tuple.src_port = ports[0];
 521        n->tuple.dst_port = ports[1];
 522        n->flow_id = flow_id;
 523
 524        if (skb->protocol == htons(ETH_P_IP)) {
 525                n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
 526                n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
 527        } else {
 528                memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
 529                       sizeof(struct in6_addr));
 530                memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
 531                       sizeof(struct in6_addr));
 532        }
 533
 534        eth = (struct ethhdr *)n->data;
 535        eth->h_proto = skb->protocol;
 536        n->tuple.eth_proto = skb->protocol;
 537        n->tuple.ip_proto = ip_proto;
 538        n->tuple.mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
 539        memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
 540
 541        rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx);
 542        if (rc)
 543                goto ret_unlock;
 544
 545        qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
 546
 547        spin_unlock_bh(&edev->arfs->arfs_list_lock);
 548
 549        set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
 550        schedule_delayed_work(&edev->sp_task, 0);
 551
 552        return n->sw_id;
 553
 554ret_unlock:
 555        spin_unlock_bh(&edev->arfs->arfs_list_lock);
 556        return rc;
 557}
 558#endif
 559
 560void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port)
 561{
 562        struct qede_dev *edev = dev;
 563
 564        if (edev->vxlan_dst_port != vxlan_port)
 565                edev->vxlan_dst_port = 0;
 566
 567        if (edev->geneve_dst_port != geneve_port)
 568                edev->geneve_dst_port = 0;
 569}
 570
 571void qede_force_mac(void *dev, u8 *mac, bool forced)
 572{
 573        struct qede_dev *edev = dev;
 574
 575        __qede_lock(edev);
 576
 577        if (!is_valid_ether_addr(mac)) {
 578                __qede_unlock(edev);
 579                return;
 580        }
 581
 582        ether_addr_copy(edev->ndev->dev_addr, mac);
 583        __qede_unlock(edev);
 584}
 585
 586void qede_fill_rss_params(struct qede_dev *edev,
 587                          struct qed_update_vport_rss_params *rss, u8 *update)
 588{
 589        bool need_reset = false;
 590        int i;
 591
 592        if (QEDE_RSS_COUNT(edev) <= 1) {
 593                memset(rss, 0, sizeof(*rss));
 594                *update = 0;
 595                return;
 596        }
 597
 598        /* Need to validate current RSS config uses valid entries */
 599        for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
 600                if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) {
 601                        need_reset = true;
 602                        break;
 603                }
 604        }
 605
 606        if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) {
 607                for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
 608                        u16 indir_val, val;
 609
 610                        val = QEDE_RSS_COUNT(edev);
 611                        indir_val = ethtool_rxfh_indir_default(i, val);
 612                        edev->rss_ind_table[i] = indir_val;
 613                }
 614                edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
 615        }
 616
 617        /* Now that we have the queue-indirection, prepare the handles */
 618        for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
 619                u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]);
 620
 621                rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
 622        }
 623
 624        if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
 625                netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key));
 626                edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
 627        }
 628        memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key));
 629
 630        if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
 631                edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 |
 632                    QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
 633                edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
 634        }
 635        rss->rss_caps = edev->rss_caps;
 636
 637        *update = 1;
 638}
 639
 640static int qede_set_ucast_rx_mac(struct qede_dev *edev,
 641                                 enum qed_filter_xcast_params_type opcode,
 642                                 unsigned char mac[ETH_ALEN])
 643{
 644        struct qed_filter_params filter_cmd;
 645
 646        memset(&filter_cmd, 0, sizeof(filter_cmd));
 647        filter_cmd.type = QED_FILTER_TYPE_UCAST;
 648        filter_cmd.filter.ucast.type = opcode;
 649        filter_cmd.filter.ucast.mac_valid = 1;
 650        ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
 651
 652        return edev->ops->filter_config(edev->cdev, &filter_cmd);
 653}
 654
 655static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
 656                                  enum qed_filter_xcast_params_type opcode,
 657                                  u16 vid)
 658{
 659        struct qed_filter_params filter_cmd;
 660
 661        memset(&filter_cmd, 0, sizeof(filter_cmd));
 662        filter_cmd.type = QED_FILTER_TYPE_UCAST;
 663        filter_cmd.filter.ucast.type = opcode;
 664        filter_cmd.filter.ucast.vlan_valid = 1;
 665        filter_cmd.filter.ucast.vlan = vid;
 666
 667        return edev->ops->filter_config(edev->cdev, &filter_cmd);
 668}
 669
 670static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
 671{
 672        struct qed_update_vport_params *params;
 673        int rc;
 674
 675        /* Proceed only if action actually needs to be performed */
 676        if (edev->accept_any_vlan == action)
 677                return 0;
 678
 679        params = vzalloc(sizeof(*params));
 680        if (!params)
 681                return -ENOMEM;
 682
 683        params->vport_id = 0;
 684        params->accept_any_vlan = action;
 685        params->update_accept_any_vlan_flg = 1;
 686
 687        rc = edev->ops->vport_update(edev->cdev, params);
 688        if (rc) {
 689                DP_ERR(edev, "Failed to %s accept-any-vlan\n",
 690                       action ? "enable" : "disable");
 691        } else {
 692                DP_INFO(edev, "%s accept-any-vlan\n",
 693                        action ? "enabled" : "disabled");
 694                edev->accept_any_vlan = action;
 695        }
 696
 697        vfree(params);
 698        return 0;
 699}
 700
 701int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
 702{
 703        struct qede_dev *edev = netdev_priv(dev);
 704        struct qede_vlan *vlan, *tmp;
 705        int rc = 0;
 706
 707        DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
 708
 709        vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
 710        if (!vlan) {
 711                DP_INFO(edev, "Failed to allocate struct for vlan\n");
 712                return -ENOMEM;
 713        }
 714        INIT_LIST_HEAD(&vlan->list);
 715        vlan->vid = vid;
 716        vlan->configured = false;
 717
 718        /* Verify vlan isn't already configured */
 719        list_for_each_entry(tmp, &edev->vlan_list, list) {
 720                if (tmp->vid == vlan->vid) {
 721                        DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
 722                                   "vlan already configured\n");
 723                        kfree(vlan);
 724                        return -EEXIST;
 725                }
 726        }
 727
 728        /* If interface is down, cache this VLAN ID and return */
 729        __qede_lock(edev);
 730        if (edev->state != QEDE_STATE_OPEN) {
 731                DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
 732                           "Interface is down, VLAN %d will be configured when interface is up\n",
 733                           vid);
 734                if (vid != 0)
 735                        edev->non_configured_vlans++;
 736                list_add(&vlan->list, &edev->vlan_list);
 737                goto out;
 738        }
 739
 740        /* Check for the filter limit.
 741         * Note - vlan0 has a reserved filter and can be added without
 742         * worrying about quota
 743         */
 744        if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
 745            (vlan->vid == 0)) {
 746                rc = qede_set_ucast_rx_vlan(edev,
 747                                            QED_FILTER_XCAST_TYPE_ADD,
 748                                            vlan->vid);
 749                if (rc) {
 750                        DP_ERR(edev, "Failed to configure VLAN %d\n",
 751                               vlan->vid);
 752                        kfree(vlan);
 753                        goto out;
 754                }
 755                vlan->configured = true;
 756
 757                /* vlan0 filter isn't consuming out of our quota */
 758                if (vlan->vid != 0)
 759                        edev->configured_vlans++;
 760        } else {
 761                /* Out of quota; Activate accept-any-VLAN mode */
 762                if (!edev->non_configured_vlans) {
 763                        rc = qede_config_accept_any_vlan(edev, true);
 764                        if (rc) {
 765                                kfree(vlan);
 766                                goto out;
 767                        }
 768                }
 769
 770                edev->non_configured_vlans++;
 771        }
 772
 773        list_add(&vlan->list, &edev->vlan_list);
 774
 775out:
 776        __qede_unlock(edev);
 777        return rc;
 778}
 779
 780static void qede_del_vlan_from_list(struct qede_dev *edev,
 781                                    struct qede_vlan *vlan)
 782{
 783        /* vlan0 filter isn't consuming out of our quota */
 784        if (vlan->vid != 0) {
 785                if (vlan->configured)
 786                        edev->configured_vlans--;
 787                else
 788                        edev->non_configured_vlans--;
 789        }
 790
 791        list_del(&vlan->list);
 792        kfree(vlan);
 793}
 794
 795int qede_configure_vlan_filters(struct qede_dev *edev)
 796{
 797        int rc = 0, real_rc = 0, accept_any_vlan = 0;
 798        struct qed_dev_eth_info *dev_info;
 799        struct qede_vlan *vlan = NULL;
 800
 801        if (list_empty(&edev->vlan_list))
 802                return 0;
 803
 804        dev_info = &edev->dev_info;
 805
 806        /* Configure non-configured vlans */
 807        list_for_each_entry(vlan, &edev->vlan_list, list) {
 808                if (vlan->configured)
 809                        continue;
 810
 811                /* We have used all our credits, now enable accept_any_vlan */
 812                if ((vlan->vid != 0) &&
 813                    (edev->configured_vlans == dev_info->num_vlan_filters)) {
 814                        accept_any_vlan = 1;
 815                        continue;
 816                }
 817
 818                DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
 819
 820                rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
 821                                            vlan->vid);
 822                if (rc) {
 823                        DP_ERR(edev, "Failed to configure VLAN %u\n",
 824                               vlan->vid);
 825                        real_rc = rc;
 826                        continue;
 827                }
 828
 829                vlan->configured = true;
 830                /* vlan0 filter doesn't consume our VLAN filter's quota */
 831                if (vlan->vid != 0) {
 832                        edev->non_configured_vlans--;
 833                        edev->configured_vlans++;
 834                }
 835        }
 836
 837        /* enable accept_any_vlan mode if we have more VLANs than credits,
 838         * or remove accept_any_vlan mode if we've actually removed
 839         * a non-configured vlan, and all remaining vlans are truly configured.
 840         */
 841
 842        if (accept_any_vlan)
 843                rc = qede_config_accept_any_vlan(edev, true);
 844        else if (!edev->non_configured_vlans)
 845                rc = qede_config_accept_any_vlan(edev, false);
 846
 847        if (rc && !real_rc)
 848                real_rc = rc;
 849
 850        return real_rc;
 851}
 852
 853int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
 854{
 855        struct qede_dev *edev = netdev_priv(dev);
 856        struct qede_vlan *vlan = NULL;
 857        int rc = 0;
 858
 859        DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
 860
 861        /* Find whether entry exists */
 862        __qede_lock(edev);
 863        list_for_each_entry(vlan, &edev->vlan_list, list)
 864                if (vlan->vid == vid)
 865                        break;
 866
 867        if (!vlan || (vlan->vid != vid)) {
 868                DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
 869                           "Vlan isn't configured\n");
 870                goto out;
 871        }
 872
 873        if (edev->state != QEDE_STATE_OPEN) {
 874                /* As interface is already down, we don't have a VPORT
 875                 * instance to remove vlan filter. So just update vlan list
 876                 */
 877                DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
 878                           "Interface is down, removing VLAN from list only\n");
 879                qede_del_vlan_from_list(edev, vlan);
 880                goto out;
 881        }
 882
 883        /* Remove vlan */
 884        if (vlan->configured) {
 885                rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
 886                                            vid);
 887                if (rc) {
 888                        DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
 889                        goto out;
 890                }
 891        }
 892
 893        qede_del_vlan_from_list(edev, vlan);
 894
 895        /* We have removed a VLAN - try to see if we can
 896         * configure non-configured VLAN from the list.
 897         */
 898        rc = qede_configure_vlan_filters(edev);
 899
 900out:
 901        __qede_unlock(edev);
 902        return rc;
 903}
 904
 905void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
 906{
 907        struct qede_vlan *vlan = NULL;
 908
 909        if (list_empty(&edev->vlan_list))
 910                return;
 911
 912        list_for_each_entry(vlan, &edev->vlan_list, list) {
 913                if (!vlan->configured)
 914                        continue;
 915
 916                vlan->configured = false;
 917
 918                /* vlan0 filter isn't consuming out of our quota */
 919                if (vlan->vid != 0) {
 920                        edev->non_configured_vlans++;
 921                        edev->configured_vlans--;
 922                }
 923
 924                DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
 925                           "marked vlan %d as non-configured\n", vlan->vid);
 926        }
 927
 928        edev->accept_any_vlan = false;
 929}
 930
 931static void qede_set_features_reload(struct qede_dev *edev,
 932                                     struct qede_reload_args *args)
 933{
 934        edev->ndev->features = args->u.features;
 935}
 936
 937netdev_features_t qede_fix_features(struct net_device *dev,
 938                                    netdev_features_t features)
 939{
 940        struct qede_dev *edev = netdev_priv(dev);
 941
 942        if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE ||
 943            !(features & NETIF_F_GRO))
 944                features &= ~NETIF_F_GRO_HW;
 945
 946        return features;
 947}
 948
 949int qede_set_features(struct net_device *dev, netdev_features_t features)
 950{
 951        struct qede_dev *edev = netdev_priv(dev);
 952        netdev_features_t changes = features ^ dev->features;
 953        bool need_reload = false;
 954
 955        if (changes & NETIF_F_GRO_HW)
 956                need_reload = true;
 957
 958        if (need_reload) {
 959                struct qede_reload_args args;
 960
 961                args.u.features = features;
 962                args.func = &qede_set_features_reload;
 963
 964                /* Make sure that we definitely need to reload.
 965                 * In case of an eBPF attached program, there will be no FW
 966                 * aggregations, so no need to actually reload.
 967                 */
 968                __qede_lock(edev);
 969                if (edev->xdp_prog)
 970                        args.func(edev, &args);
 971                else
 972                        qede_reload(edev, &args, true);
 973                __qede_unlock(edev);
 974
 975                return 1;
 976        }
 977
 978        return 0;
 979}
 980
 981void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
 982{
 983        struct qede_dev *edev = netdev_priv(dev);
 984        struct qed_tunn_params tunn_params;
 985        u16 t_port = ntohs(ti->port);
 986        int rc;
 987
 988        memset(&tunn_params, 0, sizeof(tunn_params));
 989
 990        switch (ti->type) {
 991        case UDP_TUNNEL_TYPE_VXLAN:
 992                if (!edev->dev_info.common.vxlan_enable)
 993                        return;
 994
 995                if (edev->vxlan_dst_port)
 996                        return;
 997
 998                tunn_params.update_vxlan_port = 1;
 999                tunn_params.vxlan_port = t_port;
1000
1001                __qede_lock(edev);
1002                rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
1003                __qede_unlock(edev);
1004
1005                if (!rc) {
1006                        edev->vxlan_dst_port = t_port;
1007                        DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
1008                                   t_port);
1009                } else {
1010                        DP_NOTICE(edev, "Failed to add vxlan UDP port=%d\n",
1011                                  t_port);
1012                }
1013
1014                break;
1015        case UDP_TUNNEL_TYPE_GENEVE:
1016                if (!edev->dev_info.common.geneve_enable)
1017                        return;
1018
1019                if (edev->geneve_dst_port)
1020                        return;
1021
1022                tunn_params.update_geneve_port = 1;
1023                tunn_params.geneve_port = t_port;
1024
1025                __qede_lock(edev);
1026                rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
1027                __qede_unlock(edev);
1028
1029                if (!rc) {
1030                        edev->geneve_dst_port = t_port;
1031                        DP_VERBOSE(edev, QED_MSG_DEBUG,
1032                                   "Added geneve port=%d\n", t_port);
1033                } else {
1034                        DP_NOTICE(edev, "Failed to add geneve UDP port=%d\n",
1035                                  t_port);
1036                }
1037
1038                break;
1039        default:
1040                return;
1041        }
1042}
1043
1044void qede_udp_tunnel_del(struct net_device *dev,
1045                         struct udp_tunnel_info *ti)
1046{
1047        struct qede_dev *edev = netdev_priv(dev);
1048        struct qed_tunn_params tunn_params;
1049        u16 t_port = ntohs(ti->port);
1050
1051        memset(&tunn_params, 0, sizeof(tunn_params));
1052
1053        switch (ti->type) {
1054        case UDP_TUNNEL_TYPE_VXLAN:
1055                if (t_port != edev->vxlan_dst_port)
1056                        return;
1057
1058                tunn_params.update_vxlan_port = 1;
1059                tunn_params.vxlan_port = 0;
1060
1061                __qede_lock(edev);
1062                edev->ops->tunn_config(edev->cdev, &tunn_params);
1063                __qede_unlock(edev);
1064
1065                edev->vxlan_dst_port = 0;
1066
1067                DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
1068                           t_port);
1069
1070                break;
1071        case UDP_TUNNEL_TYPE_GENEVE:
1072                if (t_port != edev->geneve_dst_port)
1073                        return;
1074
1075                tunn_params.update_geneve_port = 1;
1076                tunn_params.geneve_port = 0;
1077
1078                __qede_lock(edev);
1079                edev->ops->tunn_config(edev->cdev, &tunn_params);
1080                __qede_unlock(edev);
1081
1082                edev->geneve_dst_port = 0;
1083
1084                DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
1085                           t_port);
1086                break;
1087        default:
1088                return;
1089        }
1090}
1091
1092static void qede_xdp_reload_func(struct qede_dev *edev,
1093                                 struct qede_reload_args *args)
1094{
1095        struct bpf_prog *old;
1096
1097        old = xchg(&edev->xdp_prog, args->u.new_prog);
1098        if (old)
1099                bpf_prog_put(old);
1100}
1101
1102static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
1103{
1104        struct qede_reload_args args;
1105
1106        /* If we're called, there was already a bpf reference increment */
1107        args.func = &qede_xdp_reload_func;
1108        args.u.new_prog = prog;
1109        qede_reload(edev, &args, false);
1110
1111        return 0;
1112}
1113
1114int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1115{
1116        struct qede_dev *edev = netdev_priv(dev);
1117
1118        switch (xdp->command) {
1119        case XDP_SETUP_PROG:
1120                return qede_xdp_set(edev, xdp->prog);
1121        case XDP_QUERY_PROG:
1122                xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0;
1123                return 0;
1124        default:
1125                return -EINVAL;
1126        }
1127}
1128
1129static int qede_set_mcast_rx_mac(struct qede_dev *edev,
1130                                 enum qed_filter_xcast_params_type opcode,
1131                                 unsigned char *mac, int num_macs)
1132{
1133        struct qed_filter_params filter_cmd;
1134        int i;
1135
1136        memset(&filter_cmd, 0, sizeof(filter_cmd));
1137        filter_cmd.type = QED_FILTER_TYPE_MCAST;
1138        filter_cmd.filter.mcast.type = opcode;
1139        filter_cmd.filter.mcast.num = num_macs;
1140
1141        for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
1142                ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
1143
1144        return edev->ops->filter_config(edev->cdev, &filter_cmd);
1145}
1146
1147int qede_set_mac_addr(struct net_device *ndev, void *p)
1148{
1149        struct qede_dev *edev = netdev_priv(ndev);
1150        struct sockaddr *addr = p;
1151        int rc = 0;
1152
1153        /* Make sure the state doesn't transition while changing the MAC.
1154         * Also, all flows accessing the dev_addr field are doing that under
1155         * this lock.
1156         */
1157        __qede_lock(edev);
1158
1159        if (!is_valid_ether_addr(addr->sa_data)) {
1160                DP_NOTICE(edev, "The MAC address is not valid\n");
1161                rc = -EFAULT;
1162                goto out;
1163        }
1164
1165        if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
1166                DP_NOTICE(edev, "qed prevents setting MAC %pM\n",
1167                          addr->sa_data);
1168                rc = -EINVAL;
1169                goto out;
1170        }
1171
1172        if (edev->state == QEDE_STATE_OPEN) {
1173                /* Remove the previous primary mac */
1174                rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1175                                           ndev->dev_addr);
1176                if (rc)
1177                        goto out;
1178        }
1179
1180        ether_addr_copy(ndev->dev_addr, addr->sa_data);
1181        DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data);
1182
1183        if (edev->state != QEDE_STATE_OPEN) {
1184                DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1185                           "The device is currently down\n");
1186                /* Ask PF to explicitly update a copy in bulletin board */
1187                if (IS_VF(edev) && edev->ops->req_bulletin_update_mac)
1188                        edev->ops->req_bulletin_update_mac(edev->cdev,
1189                                                           ndev->dev_addr);
1190                goto out;
1191        }
1192
1193        edev->ops->common->update_mac(edev->cdev, ndev->dev_addr);
1194
1195        rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1196                                   ndev->dev_addr);
1197out:
1198        __qede_unlock(edev);
1199        return rc;
1200}
1201
1202static int
1203qede_configure_mcast_filtering(struct net_device *ndev,
1204                               enum qed_filter_rx_mode_type *accept_flags)
1205{
1206        struct qede_dev *edev = netdev_priv(ndev);
1207        unsigned char *mc_macs, *temp;
1208        struct netdev_hw_addr *ha;
1209        int rc = 0, mc_count;
1210        size_t size;
1211
1212        size = 64 * ETH_ALEN;
1213
1214        mc_macs = kzalloc(size, GFP_KERNEL);
1215        if (!mc_macs) {
1216                DP_NOTICE(edev,
1217                          "Failed to allocate memory for multicast MACs\n");
1218                rc = -ENOMEM;
1219                goto exit;
1220        }
1221
1222        temp = mc_macs;
1223
1224        /* Remove all previously configured MAC filters */
1225        rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1226                                   mc_macs, 1);
1227        if (rc)
1228                goto exit;
1229
1230        netif_addr_lock_bh(ndev);
1231
1232        mc_count = netdev_mc_count(ndev);
1233        if (mc_count < 64) {
1234                netdev_for_each_mc_addr(ha, ndev) {
1235                        ether_addr_copy(temp, ha->addr);
1236                        temp += ETH_ALEN;
1237                }
1238        }
1239
1240        netif_addr_unlock_bh(ndev);
1241
1242        /* Check for all multicast @@@TBD resource allocation */
1243        if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) {
1244                if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
1245                        *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1246        } else {
1247                /* Add all multicast MAC filters */
1248                rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1249                                           mc_macs, mc_count);
1250        }
1251
1252exit:
1253        kfree(mc_macs);
1254        return rc;
1255}
1256
1257void qede_set_rx_mode(struct net_device *ndev)
1258{
1259        struct qede_dev *edev = netdev_priv(ndev);
1260
1261        set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
1262        schedule_delayed_work(&edev->sp_task, 0);
1263}
1264
1265/* Must be called with qede_lock held */
1266void qede_config_rx_mode(struct net_device *ndev)
1267{
1268        enum qed_filter_rx_mode_type accept_flags;
1269        struct qede_dev *edev = netdev_priv(ndev);
1270        struct qed_filter_params rx_mode;
1271        unsigned char *uc_macs, *temp;
1272        struct netdev_hw_addr *ha;
1273        int rc, uc_count;
1274        size_t size;
1275
1276        netif_addr_lock_bh(ndev);
1277
1278        uc_count = netdev_uc_count(ndev);
1279        size = uc_count * ETH_ALEN;
1280
1281        uc_macs = kzalloc(size, GFP_ATOMIC);
1282        if (!uc_macs) {
1283                DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
1284                netif_addr_unlock_bh(ndev);
1285                return;
1286        }
1287
1288        temp = uc_macs;
1289        netdev_for_each_uc_addr(ha, ndev) {
1290                ether_addr_copy(temp, ha->addr);
1291                temp += ETH_ALEN;
1292        }
1293
1294        netif_addr_unlock_bh(ndev);
1295
1296        /* Configure the struct for the Rx mode */
1297        memset(&rx_mode, 0, sizeof(struct qed_filter_params));
1298        rx_mode.type = QED_FILTER_TYPE_RX_MODE;
1299
1300        /* Remove all previous unicast secondary macs and multicast macs
1301         * (configrue / leave the primary mac)
1302         */
1303        rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
1304                                   edev->ndev->dev_addr);
1305        if (rc)
1306                goto out;
1307
1308        /* Check for promiscuous */
1309        if (ndev->flags & IFF_PROMISC)
1310                accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1311        else
1312                accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR;
1313
1314        /* Configure all filters regardless, in case promisc is rejected */
1315        if (uc_count < edev->dev_info.num_mac_filters) {
1316                int i;
1317
1318                temp = uc_macs;
1319                for (i = 0; i < uc_count; i++) {
1320                        rc = qede_set_ucast_rx_mac(edev,
1321                                                   QED_FILTER_XCAST_TYPE_ADD,
1322                                                   temp);
1323                        if (rc)
1324                                goto out;
1325
1326                        temp += ETH_ALEN;
1327                }
1328        } else {
1329                accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1330        }
1331
1332        rc = qede_configure_mcast_filtering(ndev, &accept_flags);
1333        if (rc)
1334                goto out;
1335
1336        /* take care of VLAN mode */
1337        if (ndev->flags & IFF_PROMISC) {
1338                qede_config_accept_any_vlan(edev, true);
1339        } else if (!edev->non_configured_vlans) {
1340                /* It's possible that accept_any_vlan mode is set due to a
1341                 * previous setting of IFF_PROMISC. If vlan credits are
1342                 * sufficient, disable accept_any_vlan.
1343                 */
1344                qede_config_accept_any_vlan(edev, false);
1345        }
1346
1347        rx_mode.filter.accept_flags = accept_flags;
1348        edev->ops->filter_config(edev->cdev, &rx_mode);
1349out:
1350        kfree(uc_macs);
1351}
1352
1353static struct qede_arfs_fltr_node *
1354qede_get_arfs_fltr_by_loc(struct hlist_head *head, u64 location)
1355{
1356        struct qede_arfs_fltr_node *fltr;
1357
1358        hlist_for_each_entry(fltr, head, node)
1359                if (location == fltr->sw_id)
1360                        return fltr;
1361
1362        return NULL;
1363}
1364
1365int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
1366                          u32 *rule_locs)
1367{
1368        struct qede_arfs_fltr_node *fltr;
1369        struct hlist_head *head;
1370        int cnt = 0, rc = 0;
1371
1372        info->data = QEDE_RFS_MAX_FLTR;
1373
1374        __qede_lock(edev);
1375
1376        if (!edev->arfs) {
1377                rc = -EPERM;
1378                goto unlock;
1379        }
1380
1381        head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1382
1383        hlist_for_each_entry(fltr, head, node) {
1384                if (cnt == info->rule_cnt) {
1385                        rc = -EMSGSIZE;
1386                        goto unlock;
1387                }
1388
1389                rule_locs[cnt] = fltr->sw_id;
1390                cnt++;
1391        }
1392
1393        info->rule_cnt = cnt;
1394
1395unlock:
1396        __qede_unlock(edev);
1397        return rc;
1398}
1399
1400int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd)
1401{
1402        struct ethtool_rx_flow_spec *fsp = &cmd->fs;
1403        struct qede_arfs_fltr_node *fltr = NULL;
1404        int rc = 0;
1405
1406        cmd->data = QEDE_RFS_MAX_FLTR;
1407
1408        __qede_lock(edev);
1409
1410        if (!edev->arfs) {
1411                rc = -EPERM;
1412                goto unlock;
1413        }
1414
1415        fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1416                                         fsp->location);
1417        if (!fltr) {
1418                DP_NOTICE(edev, "Rule not found - location=0x%x\n",
1419                          fsp->location);
1420                rc = -EINVAL;
1421                goto unlock;
1422        }
1423
1424        if (fltr->tuple.eth_proto == htons(ETH_P_IP)) {
1425                if (fltr->tuple.ip_proto == IPPROTO_TCP)
1426                        fsp->flow_type = TCP_V4_FLOW;
1427                else
1428                        fsp->flow_type = UDP_V4_FLOW;
1429
1430                fsp->h_u.tcp_ip4_spec.psrc = fltr->tuple.src_port;
1431                fsp->h_u.tcp_ip4_spec.pdst = fltr->tuple.dst_port;
1432                fsp->h_u.tcp_ip4_spec.ip4src = fltr->tuple.src_ipv4;
1433                fsp->h_u.tcp_ip4_spec.ip4dst = fltr->tuple.dst_ipv4;
1434        } else {
1435                if (fltr->tuple.ip_proto == IPPROTO_TCP)
1436                        fsp->flow_type = TCP_V6_FLOW;
1437                else
1438                        fsp->flow_type = UDP_V6_FLOW;
1439                fsp->h_u.tcp_ip6_spec.psrc = fltr->tuple.src_port;
1440                fsp->h_u.tcp_ip6_spec.pdst = fltr->tuple.dst_port;
1441                memcpy(&fsp->h_u.tcp_ip6_spec.ip6src,
1442                       &fltr->tuple.src_ipv6, sizeof(struct in6_addr));
1443                memcpy(&fsp->h_u.tcp_ip6_spec.ip6dst,
1444                       &fltr->tuple.dst_ipv6, sizeof(struct in6_addr));
1445        }
1446
1447        fsp->ring_cookie = fltr->rxq_id;
1448
1449        if (fltr->vfid) {
1450                fsp->ring_cookie |= ((u64)fltr->vfid) <<
1451                                        ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
1452        }
1453
1454        if (fltr->b_is_drop)
1455                fsp->ring_cookie = RX_CLS_FLOW_DISC;
1456unlock:
1457        __qede_unlock(edev);
1458        return rc;
1459}
1460
1461static int
1462qede_poll_arfs_filter_config(struct qede_dev *edev,
1463                             struct qede_arfs_fltr_node *fltr)
1464{
1465        int count = QEDE_ARFS_POLL_COUNT;
1466
1467        while (fltr->used && count) {
1468                msleep(20);
1469                count--;
1470        }
1471
1472        if (count == 0 || fltr->fw_rc) {
1473                DP_NOTICE(edev, "Timeout in polling filter config\n");
1474                qede_dequeue_fltr_and_config_searcher(edev, fltr);
1475                return -EIO;
1476        }
1477
1478        return fltr->fw_rc;
1479}
1480
1481static int qede_flow_get_min_header_size(struct qede_arfs_tuple *t)
1482{
1483        int size = ETH_HLEN;
1484
1485        if (t->eth_proto == htons(ETH_P_IP))
1486                size += sizeof(struct iphdr);
1487        else
1488                size += sizeof(struct ipv6hdr);
1489
1490        if (t->ip_proto == IPPROTO_TCP)
1491                size += sizeof(struct tcphdr);
1492        else
1493                size += sizeof(struct udphdr);
1494
1495        return size;
1496}
1497
1498static bool qede_flow_spec_ipv4_cmp(struct qede_arfs_tuple *a,
1499                                    struct qede_arfs_tuple *b)
1500{
1501        if (a->eth_proto != htons(ETH_P_IP) ||
1502            b->eth_proto != htons(ETH_P_IP))
1503                return false;
1504
1505        return (a->src_ipv4 == b->src_ipv4) &&
1506               (a->dst_ipv4 == b->dst_ipv4);
1507}
1508
1509static void qede_flow_build_ipv4_hdr(struct qede_arfs_tuple *t,
1510                                     void *header)
1511{
1512        __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct iphdr));
1513        struct iphdr *ip = (struct iphdr *)(header + ETH_HLEN);
1514        struct ethhdr *eth = (struct ethhdr *)header;
1515
1516        eth->h_proto = t->eth_proto;
1517        ip->saddr = t->src_ipv4;
1518        ip->daddr = t->dst_ipv4;
1519        ip->version = 0x4;
1520        ip->ihl = 0x5;
1521        ip->protocol = t->ip_proto;
1522        ip->tot_len = cpu_to_be16(qede_flow_get_min_header_size(t) - ETH_HLEN);
1523
1524        /* ports is weakly typed to suit both TCP and UDP ports */
1525        ports[0] = t->src_port;
1526        ports[1] = t->dst_port;
1527}
1528
1529static void qede_flow_stringify_ipv4_hdr(struct qede_arfs_tuple *t,
1530                                         void *buffer)
1531{
1532        const char *prefix = t->ip_proto == IPPROTO_TCP ? "TCP" : "UDP";
1533
1534        snprintf(buffer, QEDE_FILTER_PRINT_MAX_LEN,
1535                 "%s %pI4 (%04x) -> %pI4 (%04x)",
1536                 prefix, &t->src_ipv4, t->src_port,
1537                 &t->dst_ipv4, t->dst_port);
1538}
1539
1540static bool qede_flow_spec_ipv6_cmp(struct qede_arfs_tuple *a,
1541                                    struct qede_arfs_tuple *b)
1542{
1543        if (a->eth_proto != htons(ETH_P_IPV6) ||
1544            b->eth_proto != htons(ETH_P_IPV6))
1545                return false;
1546
1547        if (memcmp(&a->src_ipv6, &b->src_ipv6, sizeof(struct in6_addr)))
1548                return false;
1549
1550        if (memcmp(&a->dst_ipv6, &b->dst_ipv6, sizeof(struct in6_addr)))
1551                return false;
1552
1553        return true;
1554}
1555
1556static void qede_flow_build_ipv6_hdr(struct qede_arfs_tuple *t,
1557                                     void *header)
1558{
1559        __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct ipv6hdr));
1560        struct ipv6hdr *ip6 = (struct ipv6hdr *)(header + ETH_HLEN);
1561        struct ethhdr *eth = (struct ethhdr *)header;
1562
1563        eth->h_proto = t->eth_proto;
1564        memcpy(&ip6->saddr, &t->src_ipv6, sizeof(struct in6_addr));
1565        memcpy(&ip6->daddr, &t->dst_ipv6, sizeof(struct in6_addr));
1566        ip6->version = 0x6;
1567
1568        if (t->ip_proto == IPPROTO_TCP) {
1569                ip6->nexthdr = NEXTHDR_TCP;
1570                ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr));
1571        } else {
1572                ip6->nexthdr = NEXTHDR_UDP;
1573                ip6->payload_len = cpu_to_be16(sizeof(struct udphdr));
1574        }
1575
1576        /* ports is weakly typed to suit both TCP and UDP ports */
1577        ports[0] = t->src_port;
1578        ports[1] = t->dst_port;
1579}
1580
1581/* Validate fields which are set and not accepted by the driver */
1582static int qede_flow_spec_validate_unused(struct qede_dev *edev,
1583                                          struct ethtool_rx_flow_spec *fs)
1584{
1585        if (fs->flow_type & FLOW_MAC_EXT) {
1586                DP_INFO(edev, "Don't support MAC extensions\n");
1587                return -EOPNOTSUPP;
1588        }
1589
1590        if ((fs->flow_type & FLOW_EXT) &&
1591            (fs->h_ext.vlan_etype || fs->h_ext.vlan_tci)) {
1592                DP_INFO(edev, "Don't support vlan-based classification\n");
1593                return -EOPNOTSUPP;
1594        }
1595
1596        if ((fs->flow_type & FLOW_EXT) &&
1597            (fs->h_ext.data[0] || fs->h_ext.data[1])) {
1598                DP_INFO(edev, "Don't support user defined data\n");
1599                return -EOPNOTSUPP;
1600        }
1601
1602        return 0;
1603}
1604
1605static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
1606                                        struct qede_arfs_tuple *t)
1607{
1608        /* We must have Only 4-tuples/l4 port/src ip/dst ip
1609         * as an input.
1610         */
1611        if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
1612                t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
1613        } else if (!t->src_port && t->dst_port &&
1614                   !t->src_ipv4 && !t->dst_ipv4) {
1615                t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
1616        } else if (!t->src_port && !t->dst_port &&
1617                   !t->dst_ipv4 && t->src_ipv4) {
1618                t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
1619        } else if (!t->src_port && !t->dst_port &&
1620                   t->dst_ipv4 && !t->src_ipv4) {
1621                t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
1622        } else {
1623                DP_INFO(edev, "Invalid N-tuple\n");
1624                return -EOPNOTSUPP;
1625        }
1626
1627        t->ip_comp = qede_flow_spec_ipv4_cmp;
1628        t->build_hdr = qede_flow_build_ipv4_hdr;
1629        t->stringify = qede_flow_stringify_ipv4_hdr;
1630
1631        return 0;
1632}
1633
1634static int qede_set_v6_tuple_to_profile(struct qede_dev *edev,
1635                                        struct qede_arfs_tuple *t,
1636                                        struct in6_addr *zaddr)
1637{
1638        /* We must have Only 4-tuples/l4 port/src ip/dst ip
1639         * as an input.
1640         */
1641        if (t->src_port && t->dst_port &&
1642            memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
1643            memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
1644                t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
1645        } else if (!t->src_port && t->dst_port &&
1646                   !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
1647                   !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
1648                t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
1649        } else if (!t->src_port && !t->dst_port &&
1650                   !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
1651                   memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
1652                t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
1653        } else if (!t->src_port && !t->dst_port &&
1654                   memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
1655                   !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
1656                t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
1657        } else {
1658                DP_INFO(edev, "Invalid N-tuple\n");
1659                return -EOPNOTSUPP;
1660        }
1661
1662        t->ip_comp = qede_flow_spec_ipv6_cmp;
1663        t->build_hdr = qede_flow_build_ipv6_hdr;
1664
1665        return 0;
1666}
1667
1668/* Must be called while qede lock is held */
1669static struct qede_arfs_fltr_node *
1670qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t)
1671{
1672        struct qede_arfs_fltr_node *fltr;
1673        struct hlist_node *temp;
1674        struct hlist_head *head;
1675
1676        head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1677
1678        hlist_for_each_entry_safe(fltr, temp, head, node) {
1679                if (fltr->tuple.ip_proto == t->ip_proto &&
1680                    fltr->tuple.src_port == t->src_port &&
1681                    fltr->tuple.dst_port == t->dst_port &&
1682                    t->ip_comp(&fltr->tuple, t))
1683                        return fltr;
1684        }
1685
1686        return NULL;
1687}
1688
1689static void qede_flow_set_destination(struct qede_dev *edev,
1690                                      struct qede_arfs_fltr_node *n,
1691                                      struct ethtool_rx_flow_spec *fs)
1692{
1693        if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
1694                n->b_is_drop = true;
1695                return;
1696        }
1697
1698        n->vfid = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1699        n->rxq_id = ethtool_get_flow_spec_ring(fs->ring_cookie);
1700        n->next_rxq_id = n->rxq_id;
1701
1702        if (n->vfid)
1703                DP_VERBOSE(edev, QED_MSG_SP,
1704                           "Configuring N-tuple for VF 0x%02x\n", n->vfid - 1);
1705}
1706
1707int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie)
1708{
1709        struct qede_arfs_fltr_node *fltr = NULL;
1710        int rc = -EPERM;
1711
1712        __qede_lock(edev);
1713        if (!edev->arfs)
1714                goto unlock;
1715
1716        fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1717                                         cookie);
1718        if (!fltr)
1719                goto unlock;
1720
1721        qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false);
1722
1723        rc = qede_poll_arfs_filter_config(edev, fltr);
1724        if (rc == 0)
1725                qede_dequeue_fltr_and_config_searcher(edev, fltr);
1726
1727unlock:
1728        __qede_unlock(edev);
1729        return rc;
1730}
1731
1732int qede_get_arfs_filter_count(struct qede_dev *edev)
1733{
1734        int count = 0;
1735
1736        __qede_lock(edev);
1737
1738        if (!edev->arfs)
1739                goto unlock;
1740
1741        count = edev->arfs->filter_count;
1742
1743unlock:
1744        __qede_unlock(edev);
1745        return count;
1746}
1747
1748static int qede_parse_actions(struct qede_dev *edev,
1749                              struct flow_action *flow_action)
1750{
1751        const struct flow_action_entry *act;
1752        int i;
1753
1754        if (!flow_action_has_entries(flow_action)) {
1755                DP_NOTICE(edev, "No actions received\n");
1756                return -EINVAL;
1757        }
1758
1759        flow_action_for_each(i, act, flow_action) {
1760                switch (act->id) {
1761                case FLOW_ACTION_DROP:
1762                        break;
1763                case FLOW_ACTION_QUEUE:
1764                        if (act->queue.vf)
1765                                break;
1766
1767                        if (act->queue.index >= QEDE_RSS_COUNT(edev)) {
1768                                DP_INFO(edev, "Queue out-of-bounds\n");
1769                                return -EINVAL;
1770                        }
1771                        break;
1772                default:
1773                        return -EINVAL;
1774                }
1775        }
1776
1777        return 0;
1778}
1779
1780static int
1781qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule,
1782                      struct qede_arfs_tuple *t)
1783{
1784        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1785                struct flow_match_ports match;
1786
1787                flow_rule_match_ports(rule, &match);
1788                if ((match.key->src && match.mask->src != U16_MAX) ||
1789                    (match.key->dst && match.mask->dst != U16_MAX)) {
1790                        DP_NOTICE(edev, "Do not support ports masks\n");
1791                        return -EINVAL;
1792                }
1793
1794                t->src_port = match.key->src;
1795                t->dst_port = match.key->dst;
1796        }
1797
1798        return 0;
1799}
1800
1801static int
1802qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule,
1803                          struct qede_arfs_tuple *t)
1804{
1805        struct in6_addr zero_addr, addr;
1806
1807        memset(&zero_addr, 0, sizeof(addr));
1808        memset(&addr, 0xff, sizeof(addr));
1809
1810        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
1811                struct flow_match_ipv6_addrs match;
1812
1813                flow_rule_match_ipv6_addrs(rule, &match);
1814                if ((memcmp(&match.key->src, &zero_addr, sizeof(addr)) &&
1815                     memcmp(&match.mask->src, &addr, sizeof(addr))) ||
1816                    (memcmp(&match.key->dst, &zero_addr, sizeof(addr)) &&
1817                     memcmp(&match.mask->dst, &addr, sizeof(addr)))) {
1818                        DP_NOTICE(edev,
1819                                  "Do not support IPv6 address prefix/mask\n");
1820                        return -EINVAL;
1821                }
1822
1823                memcpy(&t->src_ipv6, &match.key->src, sizeof(addr));
1824                memcpy(&t->dst_ipv6, &match.key->dst, sizeof(addr));
1825        }
1826
1827        if (qede_flow_parse_ports(edev, rule, t))
1828                return -EINVAL;
1829
1830        return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
1831}
1832
1833static int
1834qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule,
1835                        struct qede_arfs_tuple *t)
1836{
1837        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
1838                struct flow_match_ipv4_addrs match;
1839
1840                flow_rule_match_ipv4_addrs(rule, &match);
1841                if ((match.key->src && match.mask->src != U32_MAX) ||
1842                    (match.key->dst && match.mask->dst != U32_MAX)) {
1843                        DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n");
1844                        return -EINVAL;
1845                }
1846
1847                t->src_ipv4 = match.key->src;
1848                t->dst_ipv4 = match.key->dst;
1849        }
1850
1851        if (qede_flow_parse_ports(edev, rule, t))
1852                return -EINVAL;
1853
1854        return qede_set_v4_tuple_to_profile(edev, t);
1855}
1856
1857static int
1858qede_flow_parse_tcp_v6(struct qede_dev *edev, struct flow_rule *rule,
1859                     struct qede_arfs_tuple *tuple)
1860{
1861        tuple->ip_proto = IPPROTO_TCP;
1862        tuple->eth_proto = htons(ETH_P_IPV6);
1863
1864        return qede_flow_parse_v6_common(edev, rule, tuple);
1865}
1866
1867static int
1868qede_flow_parse_tcp_v4(struct qede_dev *edev, struct flow_rule *rule,
1869                     struct qede_arfs_tuple *tuple)
1870{
1871        tuple->ip_proto = IPPROTO_TCP;
1872        tuple->eth_proto = htons(ETH_P_IP);
1873
1874        return qede_flow_parse_v4_common(edev, rule, tuple);
1875}
1876
1877static int
1878qede_flow_parse_udp_v6(struct qede_dev *edev, struct flow_rule *rule,
1879                     struct qede_arfs_tuple *tuple)
1880{
1881        tuple->ip_proto = IPPROTO_UDP;
1882        tuple->eth_proto = htons(ETH_P_IPV6);
1883
1884        return qede_flow_parse_v6_common(edev, rule, tuple);
1885}
1886
1887static int
1888qede_flow_parse_udp_v4(struct qede_dev *edev, struct flow_rule *rule,
1889                     struct qede_arfs_tuple *tuple)
1890{
1891        tuple->ip_proto = IPPROTO_UDP;
1892        tuple->eth_proto = htons(ETH_P_IP);
1893
1894        return qede_flow_parse_v4_common(edev, rule, tuple);
1895}
1896
1897static int
1898qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
1899                     struct flow_rule *rule, struct qede_arfs_tuple *tuple)
1900{
1901        struct flow_dissector *dissector = rule->match.dissector;
1902        int rc = -EINVAL;
1903        u8 ip_proto = 0;
1904
1905        memset(tuple, 0, sizeof(*tuple));
1906
1907        if (dissector->used_keys &
1908            ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1909              BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1910              BIT(FLOW_DISSECTOR_KEY_BASIC) |
1911              BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1912              BIT(FLOW_DISSECTOR_KEY_PORTS))) {
1913                DP_NOTICE(edev, "Unsupported key set:0x%x\n",
1914                          dissector->used_keys);
1915                return -EOPNOTSUPP;
1916        }
1917
1918        if (proto != htons(ETH_P_IP) &&
1919            proto != htons(ETH_P_IPV6)) {
1920                DP_NOTICE(edev, "Unsupported proto=0x%x\n", proto);
1921                return -EPROTONOSUPPORT;
1922        }
1923
1924        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1925                struct flow_match_basic match;
1926
1927                flow_rule_match_basic(rule, &match);
1928                ip_proto = match.key->ip_proto;
1929        }
1930
1931        if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP))
1932                rc = qede_flow_parse_tcp_v4(edev, rule, tuple);
1933        else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6))
1934                rc = qede_flow_parse_tcp_v6(edev, rule, tuple);
1935        else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP))
1936                rc = qede_flow_parse_udp_v4(edev, rule, tuple);
1937        else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6))
1938                rc = qede_flow_parse_udp_v6(edev, rule, tuple);
1939        else
1940                DP_NOTICE(edev, "Invalid protocol request\n");
1941
1942        return rc;
1943}
1944
1945int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
1946                            struct flow_cls_offload *f)
1947{
1948        struct qede_arfs_fltr_node *n;
1949        int min_hlen, rc = -EINVAL;
1950        struct qede_arfs_tuple t;
1951
1952        __qede_lock(edev);
1953
1954        if (!edev->arfs) {
1955                rc = -EPERM;
1956                goto unlock;
1957        }
1958
1959        /* parse flower attribute and prepare filter */
1960        if (qede_parse_flow_attr(edev, proto, f->rule, &t))
1961                goto unlock;
1962
1963        /* Validate profile mode and number of filters */
1964        if ((edev->arfs->filter_count && edev->arfs->mode != t.mode) ||
1965            edev->arfs->filter_count == QEDE_RFS_MAX_FLTR) {
1966                DP_NOTICE(edev,
1967                          "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
1968                          t.mode, edev->arfs->mode, edev->arfs->filter_count);
1969                goto unlock;
1970        }
1971
1972        /* parse tc actions and get the vf_id */
1973        if (qede_parse_actions(edev, &f->rule->action))
1974                goto unlock;
1975
1976        if (qede_flow_find_fltr(edev, &t)) {
1977                rc = -EEXIST;
1978                goto unlock;
1979        }
1980
1981        n = kzalloc(sizeof(*n), GFP_KERNEL);
1982        if (!n) {
1983                rc = -ENOMEM;
1984                goto unlock;
1985        }
1986
1987        min_hlen = qede_flow_get_min_header_size(&t);
1988
1989        n->data = kzalloc(min_hlen, GFP_KERNEL);
1990        if (!n->data) {
1991                kfree(n);
1992                rc = -ENOMEM;
1993                goto unlock;
1994        }
1995
1996        memcpy(&n->tuple, &t, sizeof(n->tuple));
1997
1998        n->buf_len = min_hlen;
1999        n->b_is_drop = true;
2000        n->sw_id = f->cookie;
2001
2002        n->tuple.build_hdr(&n->tuple, n->data);
2003
2004        rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
2005        if (rc)
2006                goto unlock;
2007
2008        qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
2009        rc = qede_poll_arfs_filter_config(edev, n);
2010
2011unlock:
2012        __qede_unlock(edev);
2013        return rc;
2014}
2015
2016static int qede_flow_spec_validate(struct qede_dev *edev,
2017                                   struct flow_action *flow_action,
2018                                   struct qede_arfs_tuple *t,
2019                                   __u32 location)
2020{
2021        if (location >= QEDE_RFS_MAX_FLTR) {
2022                DP_INFO(edev, "Location out-of-bounds\n");
2023                return -EINVAL;
2024        }
2025
2026        /* Check location isn't already in use */
2027        if (test_bit(location, edev->arfs->arfs_fltr_bmap)) {
2028                DP_INFO(edev, "Location already in use\n");
2029                return -EINVAL;
2030        }
2031
2032        /* Check if the filtering-mode could support the filter */
2033        if (edev->arfs->filter_count &&
2034            edev->arfs->mode != t->mode) {
2035                DP_INFO(edev,
2036                        "flow_spec would require filtering mode %08x, but %08x is configured\n",
2037                        t->mode, edev->arfs->filter_count);
2038                return -EINVAL;
2039        }
2040
2041        if (qede_parse_actions(edev, flow_action))
2042                return -EINVAL;
2043
2044        return 0;
2045}
2046
2047static int qede_flow_spec_to_rule(struct qede_dev *edev,
2048                                  struct qede_arfs_tuple *t,
2049                                  struct ethtool_rx_flow_spec *fs)
2050{
2051        struct ethtool_rx_flow_spec_input input = {};
2052        struct ethtool_rx_flow_rule *flow;
2053        __be16 proto;
2054        int err = 0;
2055
2056        if (qede_flow_spec_validate_unused(edev, fs))
2057                return -EOPNOTSUPP;
2058
2059        switch ((fs->flow_type & ~FLOW_EXT)) {
2060        case TCP_V4_FLOW:
2061        case UDP_V4_FLOW:
2062                proto = htons(ETH_P_IP);
2063                break;
2064        case TCP_V6_FLOW:
2065        case UDP_V6_FLOW:
2066                proto = htons(ETH_P_IPV6);
2067                break;
2068        default:
2069                DP_VERBOSE(edev, NETIF_MSG_IFUP,
2070                           "Can't support flow of type %08x\n", fs->flow_type);
2071                return -EOPNOTSUPP;
2072        }
2073
2074        input.fs = fs;
2075        flow = ethtool_rx_flow_rule_create(&input);
2076        if (IS_ERR(flow))
2077                return PTR_ERR(flow);
2078
2079        if (qede_parse_flow_attr(edev, proto, flow->rule, t)) {
2080                err = -EINVAL;
2081                goto err_out;
2082        }
2083
2084        /* Make sure location is valid and filter isn't already set */
2085        err = qede_flow_spec_validate(edev, &flow->rule->action, t,
2086                                      fs->location);
2087err_out:
2088        ethtool_rx_flow_rule_destroy(flow);
2089        return err;
2090
2091}
2092
2093int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
2094{
2095        struct ethtool_rx_flow_spec *fsp = &info->fs;
2096        struct qede_arfs_fltr_node *n;
2097        struct qede_arfs_tuple t;
2098        int min_hlen, rc;
2099
2100        __qede_lock(edev);
2101
2102        if (!edev->arfs) {
2103                rc = -EPERM;
2104                goto unlock;
2105        }
2106
2107        /* Translate the flow specification into something fittign our DB */
2108        rc = qede_flow_spec_to_rule(edev, &t, fsp);
2109        if (rc)
2110                goto unlock;
2111
2112        if (qede_flow_find_fltr(edev, &t)) {
2113                rc = -EINVAL;
2114                goto unlock;
2115        }
2116
2117        n = kzalloc(sizeof(*n), GFP_KERNEL);
2118        if (!n) {
2119                rc = -ENOMEM;
2120                goto unlock;
2121        }
2122
2123        min_hlen = qede_flow_get_min_header_size(&t);
2124        n->data = kzalloc(min_hlen, GFP_KERNEL);
2125        if (!n->data) {
2126                kfree(n);
2127                rc = -ENOMEM;
2128                goto unlock;
2129        }
2130
2131        n->sw_id = fsp->location;
2132        set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap);
2133        n->buf_len = min_hlen;
2134
2135        memcpy(&n->tuple, &t, sizeof(n->tuple));
2136
2137        qede_flow_set_destination(edev, n, fsp);
2138
2139        /* Build a minimal header according to the flow */
2140        n->tuple.build_hdr(&n->tuple, n->data);
2141
2142        rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
2143        if (rc)
2144                goto unlock;
2145
2146        qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
2147        rc = qede_poll_arfs_filter_config(edev, n);
2148unlock:
2149        __qede_unlock(edev);
2150
2151        return rc;
2152}
2153