linux/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
<<
>>
Prefs
   1/*
   2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
   3 *
   4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34#include <net/ipv6.h>
  35
  36#include "cxgb4.h"
  37#include "t4_regs.h"
  38#include "t4_tcb.h"
  39#include "t4_values.h"
  40#include "clip_tbl.h"
  41#include "l2t.h"
  42#include "smt.h"
  43#include "t4fw_api.h"
  44#include "cxgb4_filter.h"
  45
  46static inline bool is_field_set(u32 val, u32 mask)
  47{
  48        return val || mask;
  49}
  50
  51static inline bool unsupported(u32 conf, u32 conf_mask, u32 val, u32 mask)
  52{
  53        return !(conf & conf_mask) && is_field_set(val, mask);
  54}
  55
  56static int set_tcb_field(struct adapter *adap, struct filter_entry *f,
  57                         unsigned int ftid,  u16 word, u64 mask, u64 val,
  58                         int no_reply)
  59{
  60        struct cpl_set_tcb_field *req;
  61        struct sk_buff *skb;
  62
  63        skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
  64        if (!skb)
  65                return -ENOMEM;
  66
  67        req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req));
  68        INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid);
  69        req->reply_ctrl = htons(REPLY_CHAN_V(0) |
  70                                QUEUENO_V(adap->sge.fw_evtq.abs_id) |
  71                                NO_REPLY_V(no_reply));
  72        req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(ftid));
  73        req->mask = cpu_to_be64(mask);
  74        req->val = cpu_to_be64(val);
  75        set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
  76        t4_ofld_send(adap, skb);
  77        return 0;
  78}
  79
  80/* Set one of the t_flags bits in the TCB.
  81 */
  82static int set_tcb_tflag(struct adapter *adap, struct filter_entry *f,
  83                         unsigned int ftid, unsigned int bit_pos,
  84                         unsigned int val, int no_reply)
  85{
  86        return set_tcb_field(adap, f, ftid,  TCB_T_FLAGS_W, 1ULL << bit_pos,
  87                             (unsigned long long)val << bit_pos, no_reply);
  88}
  89
  90static void mk_abort_req_ulp(struct cpl_abort_req *abort_req, unsigned int tid)
  91{
  92        struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
  93        struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
  94
  95        txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
  96        txpkt->len = htonl(DIV_ROUND_UP(sizeof(*abort_req), 16));
  97        sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
  98        sc->len = htonl(sizeof(*abort_req) - sizeof(struct work_request_hdr));
  99        OPCODE_TID(abort_req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
 100        abort_req->rsvd0 = htonl(0);
 101        abort_req->rsvd1 = 0;
 102        abort_req->cmd = CPL_ABORT_NO_RST;
 103}
 104
 105static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl, unsigned int tid)
 106{
 107        struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
 108        struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
 109
 110        txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
 111        txpkt->len = htonl(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
 112        sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
 113        sc->len = htonl(sizeof(*abort_rpl) - sizeof(struct work_request_hdr));
 114        OPCODE_TID(abort_rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
 115        abort_rpl->rsvd0 = htonl(0);
 116        abort_rpl->rsvd1 = 0;
 117        abort_rpl->cmd = CPL_ABORT_NO_RST;
 118}
 119
 120static void mk_set_tcb_ulp(struct filter_entry *f,
 121                           struct cpl_set_tcb_field *req,
 122                           unsigned int word, u64 mask, u64 val,
 123                           u8 cookie, int no_reply)
 124{
 125        struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
 126        struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
 127
 128        txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
 129        txpkt->len = htonl(DIV_ROUND_UP(sizeof(*req), 16));
 130        sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
 131        sc->len = htonl(sizeof(*req) - sizeof(struct work_request_hdr));
 132        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
 133        req->reply_ctrl = htons(NO_REPLY_V(no_reply) | REPLY_CHAN_V(0) |
 134                                QUEUENO_V(0));
 135        req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(cookie));
 136        req->mask = cpu_to_be64(mask);
 137        req->val = cpu_to_be64(val);
 138        sc = (struct ulptx_idata *)(req + 1);
 139        sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
 140        sc->len = htonl(0);
 141}
 142
 143static int configure_filter_smac(struct adapter *adap, struct filter_entry *f)
 144{
 145        int err;
 146
 147        /* do a set-tcb for smac-sel and CWR bit.. */
 148        err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
 149        if (err)
 150                goto smac_err;
 151
 152        err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W,
 153                            TCB_SMAC_SEL_V(TCB_SMAC_SEL_M),
 154                            TCB_SMAC_SEL_V(f->smt->idx), 1);
 155        if (!err)
 156                return 0;
 157
 158smac_err:
 159        dev_err(adap->pdev_dev, "filter %u smac config failed with error %u\n",
 160                f->tid, err);
 161        return err;
 162}
 163
 164static void set_nat_params(struct adapter *adap, struct filter_entry *f,
 165                           unsigned int tid, bool dip, bool sip, bool dp,
 166                           bool sp)
 167{
 168        if (dip) {
 169                if (f->fs.type) {
 170                        set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W,
 171                                      WORD_MASK, f->fs.nat_lip[15] |
 172                                      f->fs.nat_lip[14] << 8 |
 173                                      f->fs.nat_lip[13] << 16 |
 174                                      f->fs.nat_lip[12] << 24, 1);
 175
 176                        set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 1,
 177                                      WORD_MASK, f->fs.nat_lip[11] |
 178                                      f->fs.nat_lip[10] << 8 |
 179                                      f->fs.nat_lip[9] << 16 |
 180                                      f->fs.nat_lip[8] << 24, 1);
 181
 182                        set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 2,
 183                                      WORD_MASK, f->fs.nat_lip[7] |
 184                                      f->fs.nat_lip[6] << 8 |
 185                                      f->fs.nat_lip[5] << 16 |
 186                                      f->fs.nat_lip[4] << 24, 1);
 187
 188                        set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 3,
 189                                      WORD_MASK, f->fs.nat_lip[3] |
 190                                      f->fs.nat_lip[2] << 8 |
 191                                      f->fs.nat_lip[1] << 16 |
 192                                      f->fs.nat_lip[0] << 24, 1);
 193                } else {
 194                        set_tcb_field(adap, f, tid, TCB_RX_FRAG3_LEN_RAW_W,
 195                                      WORD_MASK, f->fs.nat_lip[3] |
 196                                      f->fs.nat_lip[2] << 8 |
 197                                      f->fs.nat_lip[1] << 16 |
 198                                      f->fs.nat_lip[0] << 24, 1);
 199                }
 200        }
 201
 202        if (sip) {
 203                if (f->fs.type) {
 204                        set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W,
 205                                      WORD_MASK, f->fs.nat_fip[15] |
 206                                      f->fs.nat_fip[14] << 8 |
 207                                      f->fs.nat_fip[13] << 16 |
 208                                      f->fs.nat_fip[12] << 24, 1);
 209
 210                        set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 1,
 211                                      WORD_MASK, f->fs.nat_fip[11] |
 212                                      f->fs.nat_fip[10] << 8 |
 213                                      f->fs.nat_fip[9] << 16 |
 214                                      f->fs.nat_fip[8] << 24, 1);
 215
 216                        set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 2,
 217                                      WORD_MASK, f->fs.nat_fip[7] |
 218                                      f->fs.nat_fip[6] << 8 |
 219                                      f->fs.nat_fip[5] << 16 |
 220                                      f->fs.nat_fip[4] << 24, 1);
 221
 222                        set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 3,
 223                                      WORD_MASK, f->fs.nat_fip[3] |
 224                                      f->fs.nat_fip[2] << 8 |
 225                                      f->fs.nat_fip[1] << 16 |
 226                                      f->fs.nat_fip[0] << 24, 1);
 227
 228                } else {
 229                        set_tcb_field(adap, f, tid,
 230                                      TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W,
 231                                      WORD_MASK, f->fs.nat_fip[3] |
 232                                      f->fs.nat_fip[2] << 8 |
 233                                      f->fs.nat_fip[1] << 16 |
 234                                      f->fs.nat_fip[0] << 24, 1);
 235                }
 236        }
 237
 238        set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
 239                      (dp ? f->fs.nat_lport : 0) |
 240                      (sp ? f->fs.nat_fport << 16 : 0), 1);
 241}
 242
 243/* Validate filter spec against configuration done on the card. */
 244static int validate_filter(struct net_device *dev,
 245                           struct ch_filter_specification *fs)
 246{
 247        struct adapter *adapter = netdev2adap(dev);
 248        u32 fconf, iconf;
 249
 250        /* Check for unconfigured fields being used. */
 251        iconf = adapter->params.tp.ingress_config;
 252        fconf = fs->hash ? adapter->params.tp.filter_mask :
 253                           adapter->params.tp.vlan_pri_map;
 254
 255        if (unsupported(fconf, FCOE_F, fs->val.fcoe, fs->mask.fcoe) ||
 256            unsupported(fconf, PORT_F, fs->val.iport, fs->mask.iport) ||
 257            unsupported(fconf, TOS_F, fs->val.tos, fs->mask.tos) ||
 258            unsupported(fconf, ETHERTYPE_F, fs->val.ethtype,
 259                        fs->mask.ethtype) ||
 260            unsupported(fconf, MACMATCH_F, fs->val.macidx, fs->mask.macidx) ||
 261            unsupported(fconf, MPSHITTYPE_F, fs->val.matchtype,
 262                        fs->mask.matchtype) ||
 263            unsupported(fconf, FRAGMENTATION_F, fs->val.frag, fs->mask.frag) ||
 264            unsupported(fconf, PROTOCOL_F, fs->val.proto, fs->mask.proto) ||
 265            unsupported(fconf, VNIC_ID_F, fs->val.pfvf_vld,
 266                        fs->mask.pfvf_vld) ||
 267            unsupported(fconf, VNIC_ID_F, fs->val.ovlan_vld,
 268                        fs->mask.ovlan_vld) ||
 269            unsupported(fconf, VNIC_ID_F, fs->val.encap_vld,
 270                        fs->mask.encap_vld) ||
 271            unsupported(fconf, VLAN_F, fs->val.ivlan_vld, fs->mask.ivlan_vld))
 272                return -EOPNOTSUPP;
 273
 274        /* T4 inconveniently uses the same FT_VNIC_ID_W bits for both the Outer
 275         * VLAN Tag and PF/VF/VFvld fields based on VNIC_F being set
 276         * in TP_INGRESS_CONFIG.  Hense the somewhat crazy checks
 277         * below.  Additionally, since the T4 firmware interface also
 278         * carries that overlap, we need to translate any PF/VF
 279         * specification into that internal format below.
 280         */
 281        if ((is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
 282             is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld)) ||
 283            (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
 284             is_field_set(fs->val.encap_vld, fs->mask.encap_vld)) ||
 285            (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
 286             is_field_set(fs->val.encap_vld, fs->mask.encap_vld)))
 287                return -EOPNOTSUPP;
 288        if (unsupported(iconf, VNIC_F, fs->val.pfvf_vld, fs->mask.pfvf_vld) ||
 289            (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
 290             (iconf & VNIC_F)))
 291                return -EOPNOTSUPP;
 292        if (fs->val.pf > 0x7 || fs->val.vf > 0x7f)
 293                return -ERANGE;
 294        fs->mask.pf &= 0x7;
 295        fs->mask.vf &= 0x7f;
 296
 297        /* If the user is requesting that the filter action loop
 298         * matching packets back out one of our ports, make sure that
 299         * the egress port is in range.
 300         */
 301        if (fs->action == FILTER_SWITCH &&
 302            fs->eport >= adapter->params.nports)
 303                return -ERANGE;
 304
 305        /* Don't allow various trivially obvious bogus out-of-range values... */
 306        if (fs->val.iport >= adapter->params.nports)
 307                return -ERANGE;
 308
 309        /* T4 doesn't support removing VLAN Tags for loop back filters. */
 310        if (is_t4(adapter->params.chip) &&
 311            fs->action == FILTER_SWITCH &&
 312            (fs->newvlan == VLAN_REMOVE ||
 313             fs->newvlan == VLAN_REWRITE))
 314                return -EOPNOTSUPP;
 315
 316        if (fs->val.encap_vld &&
 317            CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
 318                return -EOPNOTSUPP;
 319        return 0;
 320}
 321
 322static int get_filter_steerq(struct net_device *dev,
 323                             struct ch_filter_specification *fs)
 324{
 325        struct adapter *adapter = netdev2adap(dev);
 326        int iq;
 327
 328        /* If the user has requested steering matching Ingress Packets
 329         * to a specific Queue Set, we need to make sure it's in range
 330         * for the port and map that into the Absolute Queue ID of the
 331         * Queue Set's Response Queue.
 332         */
 333        if (!fs->dirsteer) {
 334                if (fs->iq)
 335                        return -EINVAL;
 336                iq = 0;
 337        } else {
 338                struct port_info *pi = netdev_priv(dev);
 339
 340                /* If the iq id is greater than the number of qsets,
 341                 * then assume it is an absolute qid.
 342                 */
 343                if (fs->iq < pi->nqsets)
 344                        iq = adapter->sge.ethrxq[pi->first_qset +
 345                                                 fs->iq].rspq.abs_id;
 346                else
 347                        iq = fs->iq;
 348        }
 349
 350        return iq;
 351}
 352
 353static int get_filter_count(struct adapter *adapter, unsigned int fidx,
 354                            u64 *pkts, u64 *bytes, bool hash)
 355{
 356        unsigned int tcb_base, tcbaddr;
 357        unsigned int word_offset;
 358        struct filter_entry *f;
 359        __be64 be64_byte_count;
 360        int ret;
 361
 362        tcb_base = t4_read_reg(adapter, TP_CMM_TCB_BASE_A);
 363        if (is_hashfilter(adapter) && hash) {
 364                if (fidx < adapter->tids.ntids) {
 365                        f = adapter->tids.tid_tab[fidx];
 366                        if (!f)
 367                                return -EINVAL;
 368                } else {
 369                        return -E2BIG;
 370                }
 371        } else {
 372                if ((fidx != (adapter->tids.nftids +
 373                              adapter->tids.nsftids - 1)) &&
 374                    fidx >= adapter->tids.nftids)
 375                        return -E2BIG;
 376
 377                f = &adapter->tids.ftid_tab[fidx];
 378                if (!f->valid)
 379                        return -EINVAL;
 380        }
 381        tcbaddr = tcb_base + f->tid * TCB_SIZE;
 382
 383        spin_lock(&adapter->win0_lock);
 384        if (is_t4(adapter->params.chip)) {
 385                __be64 be64_count;
 386
 387                /* T4 doesn't maintain byte counts in hw */
 388                *bytes = 0;
 389
 390                /* Get pkts */
 391                word_offset = 4;
 392                ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
 393                                   tcbaddr + (word_offset * sizeof(__be32)),
 394                                   sizeof(be64_count),
 395                                   (__be32 *)&be64_count,
 396                                   T4_MEMORY_READ);
 397                if (ret < 0)
 398                        goto out;
 399                *pkts = be64_to_cpu(be64_count);
 400        } else {
 401                __be32 be32_count;
 402
 403                /* Get bytes */
 404                word_offset = 4;
 405                ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
 406                                   tcbaddr + (word_offset * sizeof(__be32)),
 407                                   sizeof(be64_byte_count),
 408                                   &be64_byte_count,
 409                                   T4_MEMORY_READ);
 410                if (ret < 0)
 411                        goto out;
 412                *bytes = be64_to_cpu(be64_byte_count);
 413
 414                /* Get pkts */
 415                word_offset = 6;
 416                ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
 417                                   tcbaddr + (word_offset * sizeof(__be32)),
 418                                   sizeof(be32_count),
 419                                   &be32_count,
 420                                   T4_MEMORY_READ);
 421                if (ret < 0)
 422                        goto out;
 423                *pkts = (u64)be32_to_cpu(be32_count);
 424        }
 425
 426out:
 427        spin_unlock(&adapter->win0_lock);
 428        return ret;
 429}
 430
 431int cxgb4_get_filter_counters(struct net_device *dev, unsigned int fidx,
 432                              u64 *hitcnt, u64 *bytecnt, bool hash)
 433{
 434        struct adapter *adapter = netdev2adap(dev);
 435
 436        return get_filter_count(adapter, fidx, hitcnt, bytecnt, hash);
 437}
 438
 439int cxgb4_get_free_ftid(struct net_device *dev, int family)
 440{
 441        struct adapter *adap = netdev2adap(dev);
 442        struct tid_info *t = &adap->tids;
 443        int ftid;
 444
 445        spin_lock_bh(&t->ftid_lock);
 446        if (family == PF_INET) {
 447                ftid = find_first_zero_bit(t->ftid_bmap, t->nftids);
 448                if (ftid >= t->nftids)
 449                        ftid = -1;
 450        } else {
 451                if (is_t6(adap->params.chip)) {
 452                        ftid = bitmap_find_free_region(t->ftid_bmap,
 453                                                       t->nftids, 1);
 454                        if (ftid < 0)
 455                                goto out_unlock;
 456
 457                        /* this is only a lookup, keep the found region
 458                         * unallocated
 459                         */
 460                        bitmap_release_region(t->ftid_bmap, ftid, 1);
 461                } else {
 462                        ftid = bitmap_find_free_region(t->ftid_bmap,
 463                                                       t->nftids, 2);
 464                        if (ftid < 0)
 465                                goto out_unlock;
 466
 467                        bitmap_release_region(t->ftid_bmap, ftid, 2);
 468                }
 469        }
 470out_unlock:
 471        spin_unlock_bh(&t->ftid_lock);
 472        return ftid;
 473}
 474
 475static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family,
 476                          unsigned int chip_ver)
 477{
 478        spin_lock_bh(&t->ftid_lock);
 479
 480        if (test_bit(fidx, t->ftid_bmap)) {
 481                spin_unlock_bh(&t->ftid_lock);
 482                return -EBUSY;
 483        }
 484
 485        if (family == PF_INET) {
 486                __set_bit(fidx, t->ftid_bmap);
 487        } else {
 488                if (chip_ver < CHELSIO_T6)
 489                        bitmap_allocate_region(t->ftid_bmap, fidx, 2);
 490                else
 491                        bitmap_allocate_region(t->ftid_bmap, fidx, 1);
 492        }
 493
 494        spin_unlock_bh(&t->ftid_lock);
 495        return 0;
 496}
 497
 498static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
 499                             unsigned int chip_ver)
 500{
 501        spin_lock_bh(&t->ftid_lock);
 502        if (family == PF_INET) {
 503                __clear_bit(fidx, t->ftid_bmap);
 504        } else {
 505                if (chip_ver < CHELSIO_T6)
 506                        bitmap_release_region(t->ftid_bmap, fidx, 2);
 507                else
 508                        bitmap_release_region(t->ftid_bmap, fidx, 1);
 509        }
 510        spin_unlock_bh(&t->ftid_lock);
 511}
 512
 513/* Delete the filter at a specified index. */
 514static int del_filter_wr(struct adapter *adapter, int fidx)
 515{
 516        struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
 517        struct fw_filter_wr *fwr;
 518        struct sk_buff *skb;
 519        unsigned int len;
 520
 521        len = sizeof(*fwr);
 522
 523        skb = alloc_skb(len, GFP_KERNEL);
 524        if (!skb)
 525                return -ENOMEM;
 526
 527        fwr = __skb_put(skb, len);
 528        t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
 529
 530        /* Mark the filter as "pending" and ship off the Filter Work Request.
 531         * When we get the Work Request Reply we'll clear the pending status.
 532         */
 533        f->pending = 1;
 534        t4_mgmt_tx(adapter, skb);
 535        return 0;
 536}
 537
 538/* Send a Work Request to write the filter at a specified index.  We construct
 539 * a Firmware Filter Work Request to have the work done and put the indicated
 540 * filter into "pending" mode which will prevent any further actions against
 541 * it till we get a reply from the firmware on the completion status of the
 542 * request.
 543 */
 544int set_filter_wr(struct adapter *adapter, int fidx)
 545{
 546        struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
 547        struct fw_filter2_wr *fwr;
 548        struct sk_buff *skb;
 549
 550        skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
 551        if (!skb)
 552                return -ENOMEM;
 553
 554        /* If the new filter requires loopback Destination MAC and/or VLAN
 555         * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
 556         * the filter.
 557         */
 558        if (f->fs.newdmac || f->fs.newvlan) {
 559                /* allocate L2T entry for new filter */
 560                f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
 561                                                f->fs.eport, f->fs.dmac);
 562                if (!f->l2t) {
 563                        kfree_skb(skb);
 564                        return -ENOMEM;
 565                }
 566        }
 567
 568        /* If the new filter requires loopback Source MAC rewriting then
 569         * we need to allocate a SMT entry for the filter.
 570         */
 571        if (f->fs.newsmac) {
 572                f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac);
 573                if (!f->smt) {
 574                        if (f->l2t) {
 575                                cxgb4_l2t_release(f->l2t);
 576                                f->l2t = NULL;
 577                        }
 578                        kfree_skb(skb);
 579                        return -ENOMEM;
 580                }
 581        }
 582
 583        fwr = __skb_put_zero(skb, sizeof(*fwr));
 584
 585        /* It would be nice to put most of the following in t4_hw.c but most
 586         * of the work is translating the cxgbtool ch_filter_specification
 587         * into the Work Request and the definition of that structure is
 588         * currently in cxgbtool.h which isn't appropriate to pull into the
 589         * common code.  We may eventually try to come up with a more neutral
 590         * filter specification structure but for now it's easiest to simply
 591         * put this fairly direct code in line ...
 592         */
 593        if (adapter->params.filter2_wr_support)
 594                fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER2_WR));
 595        else
 596                fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
 597        fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr) / 16));
 598        fwr->tid_to_iq =
 599                htonl(FW_FILTER_WR_TID_V(f->tid) |
 600                      FW_FILTER_WR_RQTYPE_V(f->fs.type) |
 601                      FW_FILTER_WR_NOREPLY_V(0) |
 602                      FW_FILTER_WR_IQ_V(f->fs.iq));
 603        fwr->del_filter_to_l2tix =
 604                htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
 605                      FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
 606                      FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
 607                      FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
 608                      FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
 609                      FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
 610                      FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
 611                      FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
 612                                             f->fs.newvlan == VLAN_REWRITE) |
 613                      FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
 614                                            f->fs.newvlan == VLAN_REWRITE) |
 615                      FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
 616                      FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
 617                      FW_FILTER_WR_PRIO_V(f->fs.prio) |
 618                      FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
 619        fwr->ethtype = htons(f->fs.val.ethtype);
 620        fwr->ethtypem = htons(f->fs.mask.ethtype);
 621        fwr->frag_to_ovlan_vldm =
 622                (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
 623                 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
 624                 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
 625                 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
 626                 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
 627                 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
 628        fwr->smac_sel = 0;
 629        fwr->rx_chan_rx_rpl_iq =
 630                htons(FW_FILTER_WR_RX_CHAN_V(0) |
 631                      FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
 632        fwr->maci_to_matchtypem =
 633                htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
 634                      FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
 635                      FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
 636                      FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
 637                      FW_FILTER_WR_PORT_V(f->fs.val.iport) |
 638                      FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
 639                      FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
 640                      FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
 641        fwr->ptcl = f->fs.val.proto;
 642        fwr->ptclm = f->fs.mask.proto;
 643        fwr->ttyp = f->fs.val.tos;
 644        fwr->ttypm = f->fs.mask.tos;
 645        fwr->ivlan = htons(f->fs.val.ivlan);
 646        fwr->ivlanm = htons(f->fs.mask.ivlan);
 647        fwr->ovlan = htons(f->fs.val.ovlan);
 648        fwr->ovlanm = htons(f->fs.mask.ovlan);
 649        memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
 650        memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
 651        memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
 652        memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
 653        fwr->lp = htons(f->fs.val.lport);
 654        fwr->lpm = htons(f->fs.mask.lport);
 655        fwr->fp = htons(f->fs.val.fport);
 656        fwr->fpm = htons(f->fs.mask.fport);
 657
 658        if (adapter->params.filter2_wr_support) {
 659                fwr->natmode_to_ulp_type =
 660                        FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ?
 661                                                 ULP_MODE_TCPDDP :
 662                                                 ULP_MODE_NONE) |
 663                        FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode);
 664                memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
 665                memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
 666                fwr->newlport = htons(f->fs.nat_lport);
 667                fwr->newfport = htons(f->fs.nat_fport);
 668        }
 669
 670        /* Mark the filter as "pending" and ship off the Filter Work Request.
 671         * When we get the Work Request Reply we'll clear the pending status.
 672         */
 673        f->pending = 1;
 674        set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
 675        t4_ofld_send(adapter, skb);
 676        return 0;
 677}
 678
 679/* Return an error number if the indicated filter isn't writable ... */
 680int writable_filter(struct filter_entry *f)
 681{
 682        if (f->locked)
 683                return -EPERM;
 684        if (f->pending)
 685                return -EBUSY;
 686
 687        return 0;
 688}
 689
 690/* Delete the filter at the specified index (if valid).  The checks for all
 691 * the common problems with doing this like the filter being locked, currently
 692 * pending in another operation, etc.
 693 */
 694int delete_filter(struct adapter *adapter, unsigned int fidx)
 695{
 696        struct filter_entry *f;
 697        int ret;
 698
 699        if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
 700                return -EINVAL;
 701
 702        f = &adapter->tids.ftid_tab[fidx];
 703        ret = writable_filter(f);
 704        if (ret)
 705                return ret;
 706        if (f->valid)
 707                return del_filter_wr(adapter, fidx);
 708
 709        return 0;
 710}
 711
 712/* Clear a filter and release any of its resources that we own.  This also
 713 * clears the filter's "pending" status.
 714 */
 715void clear_filter(struct adapter *adap, struct filter_entry *f)
 716{
 717        struct port_info *pi = netdev_priv(f->dev);
 718
 719        /* If the new or old filter have loopback rewriteing rules then we'll
 720         * need to free any existing L2T, SMT, CLIP entries of filter
 721         * rule.
 722         */
 723        if (f->l2t)
 724                cxgb4_l2t_release(f->l2t);
 725
 726        if (f->smt)
 727                cxgb4_smt_release(f->smt);
 728
 729        if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
 730                t4_free_encap_mac_filt(adap, pi->viid,
 731                                       f->fs.val.ovlan & 0x1ff, 0);
 732
 733        if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type)
 734                cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
 735
 736        /* The zeroing of the filter rule below clears the filter valid,
 737         * pending, locked flags, l2t pointer, etc. so it's all we need for
 738         * this operation.
 739         */
 740        memset(f, 0, sizeof(*f));
 741}
 742
 743void clear_all_filters(struct adapter *adapter)
 744{
 745        struct net_device *dev = adapter->port[0];
 746        unsigned int i;
 747
 748        if (adapter->tids.ftid_tab) {
 749                struct filter_entry *f = &adapter->tids.ftid_tab[0];
 750                unsigned int max_ftid = adapter->tids.nftids +
 751                                        adapter->tids.nsftids;
 752                /* Clear all TCAM filters */
 753                for (i = 0; i < max_ftid; i++, f++)
 754                        if (f->valid || f->pending)
 755                                cxgb4_del_filter(dev, i, &f->fs);
 756        }
 757
 758        /* Clear all hash filters */
 759        if (is_hashfilter(adapter) && adapter->tids.tid_tab) {
 760                struct filter_entry *f;
 761                unsigned int sb;
 762
 763                for (i = adapter->tids.hash_base;
 764                     i <= adapter->tids.ntids; i++) {
 765                        f = (struct filter_entry *)
 766                                adapter->tids.tid_tab[i];
 767
 768                        if (f && (f->valid || f->pending))
 769                                cxgb4_del_filter(dev, i, &f->fs);
 770                }
 771
 772                sb = t4_read_reg(adapter, LE_DB_SRVR_START_INDEX_A);
 773                for (i = 0; i < sb; i++) {
 774                        f = (struct filter_entry *)adapter->tids.tid_tab[i];
 775
 776                        if (f && (f->valid || f->pending))
 777                                cxgb4_del_filter(dev, i, &f->fs);
 778                }
 779        }
 780}
 781
 782/* Fill up default masks for set match fields. */
 783static void fill_default_mask(struct ch_filter_specification *fs)
 784{
 785        unsigned int lip = 0, lip_mask = 0;
 786        unsigned int fip = 0, fip_mask = 0;
 787        unsigned int i;
 788
 789        if (fs->val.iport && !fs->mask.iport)
 790                fs->mask.iport |= ~0;
 791        if (fs->val.fcoe && !fs->mask.fcoe)
 792                fs->mask.fcoe |= ~0;
 793        if (fs->val.matchtype && !fs->mask.matchtype)
 794                fs->mask.matchtype |= ~0;
 795        if (fs->val.macidx && !fs->mask.macidx)
 796                fs->mask.macidx |= ~0;
 797        if (fs->val.ethtype && !fs->mask.ethtype)
 798                fs->mask.ethtype |= ~0;
 799        if (fs->val.ivlan && !fs->mask.ivlan)
 800                fs->mask.ivlan |= ~0;
 801        if (fs->val.ovlan && !fs->mask.ovlan)
 802                fs->mask.ovlan |= ~0;
 803        if (fs->val.frag && !fs->mask.frag)
 804                fs->mask.frag |= ~0;
 805        if (fs->val.tos && !fs->mask.tos)
 806                fs->mask.tos |= ~0;
 807        if (fs->val.proto && !fs->mask.proto)
 808                fs->mask.proto |= ~0;
 809
 810        for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) {
 811                lip |= fs->val.lip[i];
 812                lip_mask |= fs->mask.lip[i];
 813                fip |= fs->val.fip[i];
 814                fip_mask |= fs->mask.fip[i];
 815        }
 816
 817        if (lip && !lip_mask)
 818                memset(fs->mask.lip, ~0, sizeof(fs->mask.lip));
 819
 820        if (fip && !fip_mask)
 821                memset(fs->mask.fip, ~0, sizeof(fs->mask.lip));
 822
 823        if (fs->val.lport && !fs->mask.lport)
 824                fs->mask.lport = ~0;
 825        if (fs->val.fport && !fs->mask.fport)
 826                fs->mask.fport = ~0;
 827}
 828
 829static bool is_addr_all_mask(u8 *ipmask, int family)
 830{
 831        if (family == AF_INET) {
 832                struct in_addr *addr;
 833
 834                addr = (struct in_addr *)ipmask;
 835                if (addr->s_addr == 0xffffffff)
 836                        return true;
 837        } else if (family == AF_INET6) {
 838                struct in6_addr *addr6;
 839
 840                addr6 = (struct in6_addr *)ipmask;
 841                if (addr6->s6_addr32[0] == 0xffffffff &&
 842                    addr6->s6_addr32[1] == 0xffffffff &&
 843                    addr6->s6_addr32[2] == 0xffffffff &&
 844                    addr6->s6_addr32[3] == 0xffffffff)
 845                        return true;
 846        }
 847        return false;
 848}
 849
 850static bool is_inaddr_any(u8 *ip, int family)
 851{
 852        int addr_type;
 853
 854        if (family == AF_INET) {
 855                struct in_addr *addr;
 856
 857                addr = (struct in_addr *)ip;
 858                if (addr->s_addr == htonl(INADDR_ANY))
 859                        return true;
 860        } else if (family == AF_INET6) {
 861                struct in6_addr *addr6;
 862
 863                addr6 = (struct in6_addr *)ip;
 864                addr_type = ipv6_addr_type((const struct in6_addr *)
 865                                           &addr6);
 866                if (addr_type == IPV6_ADDR_ANY)
 867                        return true;
 868        }
 869        return false;
 870}
 871
 872bool is_filter_exact_match(struct adapter *adap,
 873                           struct ch_filter_specification *fs)
 874{
 875        struct tp_params *tp = &adap->params.tp;
 876        u64 hash_filter_mask = tp->hash_filter_mask;
 877        u64 ntuple_mask = 0;
 878
 879        if (!is_hashfilter(adap))
 880                return false;
 881
 882         /* Keep tunnel VNI match disabled for hash-filters for now */
 883        if (fs->mask.encap_vld)
 884                return false;
 885
 886        if (fs->type) {
 887                if (is_inaddr_any(fs->val.fip, AF_INET6) ||
 888                    !is_addr_all_mask(fs->mask.fip, AF_INET6))
 889                        return false;
 890
 891                if (is_inaddr_any(fs->val.lip, AF_INET6) ||
 892                    !is_addr_all_mask(fs->mask.lip, AF_INET6))
 893                        return false;
 894        } else {
 895                if (is_inaddr_any(fs->val.fip, AF_INET) ||
 896                    !is_addr_all_mask(fs->mask.fip, AF_INET))
 897                        return false;
 898
 899                if (is_inaddr_any(fs->val.lip, AF_INET) ||
 900                    !is_addr_all_mask(fs->mask.lip, AF_INET))
 901                        return false;
 902        }
 903
 904        if (!fs->val.lport || fs->mask.lport != 0xffff)
 905                return false;
 906
 907        if (!fs->val.fport || fs->mask.fport != 0xffff)
 908                return false;
 909
 910        /* calculate tuple mask and compare with mask configured in hw */
 911        if (tp->fcoe_shift >= 0)
 912                ntuple_mask |= (u64)fs->mask.fcoe << tp->fcoe_shift;
 913
 914        if (tp->port_shift >= 0)
 915                ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
 916
 917        if (tp->vnic_shift >= 0) {
 918                if ((adap->params.tp.ingress_config & VNIC_F))
 919                        ntuple_mask |= (u64)fs->mask.pfvf_vld << tp->vnic_shift;
 920                else
 921                        ntuple_mask |= (u64)fs->mask.ovlan_vld <<
 922                                tp->vnic_shift;
 923        }
 924
 925        if (tp->vlan_shift >= 0)
 926                ntuple_mask |= (u64)fs->mask.ivlan << tp->vlan_shift;
 927
 928        if (tp->tos_shift >= 0)
 929                ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
 930
 931        if (tp->protocol_shift >= 0)
 932                ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
 933
 934        if (tp->ethertype_shift >= 0)
 935                ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
 936
 937        if (tp->macmatch_shift >= 0)
 938                ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
 939
 940        if (tp->matchtype_shift >= 0)
 941                ntuple_mask |= (u64)fs->mask.matchtype << tp->matchtype_shift;
 942
 943        if (tp->frag_shift >= 0)
 944                ntuple_mask |= (u64)fs->mask.frag << tp->frag_shift;
 945
 946        if (ntuple_mask != hash_filter_mask)
 947                return false;
 948
 949        return true;
 950}
 951
 952static u64 hash_filter_ntuple(struct ch_filter_specification *fs,
 953                              struct net_device *dev)
 954{
 955        struct adapter *adap = netdev2adap(dev);
 956        struct tp_params *tp = &adap->params.tp;
 957        u64 ntuple = 0;
 958
 959        /* Initialize each of the fields which we care about which are present
 960         * in the Compressed Filter Tuple.
 961         */
 962        if (tp->vlan_shift >= 0 && fs->mask.ivlan)
 963                ntuple |= (FT_VLAN_VLD_F | fs->val.ivlan) << tp->vlan_shift;
 964
 965        if (tp->port_shift >= 0 && fs->mask.iport)
 966                ntuple |= (u64)fs->val.iport << tp->port_shift;
 967
 968        if (tp->protocol_shift >= 0) {
 969                if (!fs->val.proto)
 970                        ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
 971                else
 972                        ntuple |= (u64)fs->val.proto << tp->protocol_shift;
 973        }
 974
 975        if (tp->tos_shift >= 0 && fs->mask.tos)
 976                ntuple |= (u64)(fs->val.tos) << tp->tos_shift;
 977
 978        if (tp->vnic_shift >= 0) {
 979                if ((adap->params.tp.ingress_config & USE_ENC_IDX_F) &&
 980                    fs->mask.encap_vld)
 981                        ntuple |= (u64)((fs->val.encap_vld << 16) |
 982                                        (fs->val.ovlan)) << tp->vnic_shift;
 983                else if ((adap->params.tp.ingress_config & VNIC_F) &&
 984                         fs->mask.pfvf_vld)
 985                        ntuple |= (u64)((fs->val.pfvf_vld << 16) |
 986                                        (fs->val.pf << 13) |
 987                                        (fs->val.vf)) << tp->vnic_shift;
 988                else
 989                        ntuple |= (u64)((fs->val.ovlan_vld << 16) |
 990                                        (fs->val.ovlan)) << tp->vnic_shift;
 991        }
 992
 993        if (tp->macmatch_shift >= 0 && fs->mask.macidx)
 994                ntuple |= (u64)(fs->val.macidx) << tp->macmatch_shift;
 995
 996        if (tp->ethertype_shift >= 0 && fs->mask.ethtype)
 997                ntuple |= (u64)(fs->val.ethtype) << tp->ethertype_shift;
 998
 999        if (tp->matchtype_shift >= 0 && fs->mask.matchtype)
1000                ntuple |= (u64)(fs->val.matchtype) << tp->matchtype_shift;
1001
1002        if (tp->frag_shift >= 0 && fs->mask.frag)
1003                ntuple |= (u64)(fs->val.frag) << tp->frag_shift;
1004
1005        if (tp->fcoe_shift >= 0 && fs->mask.fcoe)
1006                ntuple |= (u64)(fs->val.fcoe) << tp->fcoe_shift;
1007        return ntuple;
1008}
1009
1010static void mk_act_open_req6(struct filter_entry *f, struct sk_buff *skb,
1011                             unsigned int qid_filterid, struct adapter *adap)
1012{
1013        struct cpl_t6_act_open_req6 *t6req = NULL;
1014        struct cpl_act_open_req6 *req = NULL;
1015
1016        t6req = (struct cpl_t6_act_open_req6 *)__skb_put(skb, sizeof(*t6req));
1017        INIT_TP_WR(t6req, 0);
1018        req = (struct cpl_act_open_req6 *)t6req;
1019        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_filterid));
1020        req->local_port = cpu_to_be16(f->fs.val.lport);
1021        req->peer_port = cpu_to_be16(f->fs.val.fport);
1022        req->local_ip_hi = *(__be64 *)(&f->fs.val.lip);
1023        req->local_ip_lo = *(((__be64 *)&f->fs.val.lip) + 1);
1024        req->peer_ip_hi = *(__be64 *)(&f->fs.val.fip);
1025        req->peer_ip_lo = *(((__be64 *)&f->fs.val.fip) + 1);
1026        req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
1027                                        f->fs.newvlan == VLAN_REWRITE) |
1028                                DELACK_V(f->fs.hitcnts) |
1029                                L2T_IDX_V(f->l2t ? f->l2t->idx : 0) |
1030                                SMAC_SEL_V((cxgb4_port_viid(f->dev) &
1031                                            0x7F) << 1) |
1032                                TX_CHAN_V(f->fs.eport) |
1033                                NO_CONG_V(f->fs.rpttid) |
1034                                ULP_MODE_V(f->fs.nat_mode ?
1035                                           ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1036                                TCAM_BYPASS_F | NON_OFFLOAD_F);
1037        t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs,
1038                                                                      f->dev)));
1039        t6req->opt2 = htonl(RSS_QUEUE_VALID_F |
1040                            RSS_QUEUE_V(f->fs.iq) |
1041                            TX_QUEUE_V(f->fs.nat_mode) |
1042                            T5_OPT_2_VALID_F |
1043                            RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
1044                            CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
1045                                         (f->fs.dirsteer << 1)) |
1046                            PACE_V((f->fs.maskhash) |
1047                                   ((f->fs.dirsteerhash) << 1)) |
1048                            CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
1049}
1050
1051static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
1052                            unsigned int qid_filterid, struct adapter *adap)
1053{
1054        struct cpl_t6_act_open_req *t6req = NULL;
1055        struct cpl_act_open_req *req = NULL;
1056
1057        t6req = (struct cpl_t6_act_open_req *)__skb_put(skb, sizeof(*t6req));
1058        INIT_TP_WR(t6req, 0);
1059        req = (struct cpl_act_open_req *)t6req;
1060        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid));
1061        req->local_port = cpu_to_be16(f->fs.val.lport);
1062        req->peer_port = cpu_to_be16(f->fs.val.fport);
1063        memcpy(&req->local_ip, f->fs.val.lip, 4);
1064        memcpy(&req->peer_ip, f->fs.val.fip, 4);
1065        req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
1066                                        f->fs.newvlan == VLAN_REWRITE) |
1067                                DELACK_V(f->fs.hitcnts) |
1068                                L2T_IDX_V(f->l2t ? f->l2t->idx : 0) |
1069                                SMAC_SEL_V((cxgb4_port_viid(f->dev) &
1070                                            0x7F) << 1) |
1071                                TX_CHAN_V(f->fs.eport) |
1072                                NO_CONG_V(f->fs.rpttid) |
1073                                ULP_MODE_V(f->fs.nat_mode ?
1074                                           ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1075                                TCAM_BYPASS_F | NON_OFFLOAD_F);
1076
1077        t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs,
1078                                                                      f->dev)));
1079        t6req->opt2 = htonl(RSS_QUEUE_VALID_F |
1080                            RSS_QUEUE_V(f->fs.iq) |
1081                            TX_QUEUE_V(f->fs.nat_mode) |
1082                            T5_OPT_2_VALID_F |
1083                            RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
1084                            CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
1085                                         (f->fs.dirsteer << 1)) |
1086                            PACE_V((f->fs.maskhash) |
1087                                   ((f->fs.dirsteerhash) << 1)) |
1088                            CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
1089}
1090
1091static int cxgb4_set_hash_filter(struct net_device *dev,
1092                                 struct ch_filter_specification *fs,
1093                                 struct filter_ctx *ctx)
1094{
1095        struct adapter *adapter = netdev2adap(dev);
1096        struct port_info *pi = netdev_priv(dev);
1097        struct tid_info *t = &adapter->tids;
1098        struct filter_entry *f;
1099        struct sk_buff *skb;
1100        int iq, atid, size;
1101        int ret = 0;
1102        u32 iconf;
1103
1104        fill_default_mask(fs);
1105        ret = validate_filter(dev, fs);
1106        if (ret)
1107                return ret;
1108
1109        iq = get_filter_steerq(dev, fs);
1110        if (iq < 0)
1111                return iq;
1112
1113        f = kzalloc(sizeof(*f), GFP_KERNEL);
1114        if (!f)
1115                return -ENOMEM;
1116
1117        f->fs = *fs;
1118        f->ctx = ctx;
1119        f->dev = dev;
1120        f->fs.iq = iq;
1121
1122        /* If the new filter requires loopback Destination MAC and/or VLAN
1123         * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1124         * the filter.
1125         */
1126        if (f->fs.newdmac || f->fs.newvlan) {
1127                /* allocate L2T entry for new filter */
1128                f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
1129                                                f->fs.eport, f->fs.dmac);
1130                if (!f->l2t) {
1131                        ret = -ENOMEM;
1132                        goto out_err;
1133                }
1134        }
1135
1136        /* If the new filter requires loopback Source MAC rewriting then
1137         * we need to allocate a SMT entry for the filter.
1138         */
1139        if (f->fs.newsmac) {
1140                f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac);
1141                if (!f->smt) {
1142                        if (f->l2t) {
1143                                cxgb4_l2t_release(f->l2t);
1144                                f->l2t = NULL;
1145                        }
1146                        ret = -ENOMEM;
1147                        goto free_l2t;
1148                }
1149        }
1150
1151        atid = cxgb4_alloc_atid(t, f);
1152        if (atid < 0) {
1153                ret = atid;
1154                goto free_smt;
1155        }
1156
1157        iconf = adapter->params.tp.ingress_config;
1158        if (iconf & VNIC_F) {
1159                f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
1160                f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
1161                f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1162                f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1163        } else if (iconf & USE_ENC_IDX_F) {
1164                if (f->fs.val.encap_vld) {
1165                        struct port_info *pi = netdev_priv(f->dev);
1166                        u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
1167
1168                        /* allocate MPS TCAM entry */
1169                        ret = t4_alloc_encap_mac_filt(adapter, pi->viid,
1170                                                      match_all_mac,
1171                                                      match_all_mac,
1172                                                      f->fs.val.vni,
1173                                                      f->fs.mask.vni,
1174                                                      0, 1, 1);
1175                        if (ret < 0)
1176                                goto free_atid;
1177
1178                        f->fs.val.ovlan = ret;
1179                        f->fs.mask.ovlan = 0xffff;
1180                        f->fs.val.ovlan_vld = 1;
1181                        f->fs.mask.ovlan_vld = 1;
1182                }
1183        }
1184
1185        size = sizeof(struct cpl_t6_act_open_req);
1186        if (f->fs.type) {
1187                ret = cxgb4_clip_get(f->dev, (const u32 *)&f->fs.val.lip, 1);
1188                if (ret)
1189                        goto free_mps;
1190
1191                skb = alloc_skb(size, GFP_KERNEL);
1192                if (!skb) {
1193                        ret = -ENOMEM;
1194                        goto free_clip;
1195                }
1196
1197                mk_act_open_req6(f, skb,
1198                                 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
1199                                 adapter);
1200        } else {
1201                skb = alloc_skb(size, GFP_KERNEL);
1202                if (!skb) {
1203                        ret = -ENOMEM;
1204                        goto free_mps;
1205                }
1206
1207                mk_act_open_req(f, skb,
1208                                ((adapter->sge.fw_evtq.abs_id << 14) | atid),
1209                                adapter);
1210        }
1211
1212        f->pending = 1;
1213        set_wr_txq(skb, CPL_PRIORITY_SETUP, f->fs.val.iport & 0x3);
1214        t4_ofld_send(adapter, skb);
1215        return 0;
1216
1217free_clip:
1218        cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
1219
1220free_mps:
1221        if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
1222                t4_free_encap_mac_filt(adapter, pi->viid, f->fs.val.ovlan, 1);
1223
1224free_atid:
1225        cxgb4_free_atid(t, atid);
1226
1227free_smt:
1228        if (f->smt) {
1229                cxgb4_smt_release(f->smt);
1230                f->smt = NULL;
1231        }
1232
1233free_l2t:
1234        if (f->l2t) {
1235                cxgb4_l2t_release(f->l2t);
1236                f->l2t = NULL;
1237        }
1238
1239out_err:
1240        kfree(f);
1241        return ret;
1242}
1243
1244/* Check a Chelsio Filter Request for validity, convert it into our internal
1245 * format and send it to the hardware.  Return 0 on success, an error number
1246 * otherwise.  We attach any provided filter operation context to the internal
1247 * filter specification in order to facilitate signaling completion of the
1248 * operation.
1249 */
1250int __cxgb4_set_filter(struct net_device *dev, int filter_id,
1251                       struct ch_filter_specification *fs,
1252                       struct filter_ctx *ctx)
1253{
1254        struct adapter *adapter = netdev2adap(dev);
1255        unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1256        unsigned int max_fidx, fidx;
1257        struct filter_entry *f;
1258        u32 iconf;
1259        int iq, ret;
1260
1261        if (fs->hash) {
1262                if (is_hashfilter(adapter))
1263                        return cxgb4_set_hash_filter(dev, fs, ctx);
1264                netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n",
1265                           __func__);
1266                return -EINVAL;
1267        }
1268
1269        max_fidx = adapter->tids.nftids;
1270        if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
1271            filter_id >= max_fidx)
1272                return -E2BIG;
1273
1274        fill_default_mask(fs);
1275
1276        ret = validate_filter(dev, fs);
1277        if (ret)
1278                return ret;
1279
1280        iq = get_filter_steerq(dev, fs);
1281        if (iq < 0)
1282                return iq;
1283
1284        /* IPv6 filters occupy four slots and must be aligned on
1285         * four-slot boundaries.  IPv4 filters only occupy a single
1286         * slot and have no alignment requirements but writing a new
1287         * IPv4 filter into the middle of an existing IPv6 filter
1288         * requires clearing the old IPv6 filter and hence we prevent
1289         * insertion.
1290         */
1291        if (fs->type == 0) { /* IPv4 */
1292                /* For T6, If our IPv4 filter isn't being written to a
1293                 * multiple of two filter index and there's an IPv6
1294                 * filter at the multiple of 2 base slot, then we need
1295                 * to delete that IPv6 filter ...
1296                 * For adapters below T6, IPv6 filter occupies 4 entries.
1297                 * Hence we need to delete the filter in multiple of 4 slot.
1298                 */
1299                if (chip_ver < CHELSIO_T6)
1300                        fidx = filter_id & ~0x3;
1301                else
1302                        fidx = filter_id & ~0x1;
1303
1304                if (fidx != filter_id &&
1305                    adapter->tids.ftid_tab[fidx].fs.type) {
1306                        f = &adapter->tids.ftid_tab[fidx];
1307                        if (f->valid) {
1308                                dev_err(adapter->pdev_dev,
1309                                        "Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n",
1310                                        fidx, fidx + 3);
1311                                return -EINVAL;
1312                        }
1313                }
1314        } else { /* IPv6 */
1315                if (chip_ver < CHELSIO_T6) {
1316                        /* Ensure that the IPv6 filter is aligned on a
1317                         * multiple of 4 boundary.
1318                         */
1319                        if (filter_id & 0x3) {
1320                                dev_err(adapter->pdev_dev,
1321                                        "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
1322                                return -EINVAL;
1323                        }
1324
1325                        /* Check all except the base overlapping IPv4 filter
1326                         * slots.
1327                         */
1328                        for (fidx = filter_id + 1; fidx < filter_id + 4;
1329                             fidx++) {
1330                                f = &adapter->tids.ftid_tab[fidx];
1331                                if (f->valid) {
1332                                        dev_err(adapter->pdev_dev,
1333                                                "Invalid location.  IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
1334                                                fidx);
1335                                        return -EBUSY;
1336                                }
1337                        }
1338                } else {
1339                        /* For T6, CLIP being enabled, IPv6 filter would occupy
1340                         * 2 entries.
1341                         */
1342                        if (filter_id & 0x1)
1343                                return -EINVAL;
1344                        /* Check overlapping IPv4 filter slot */
1345                        fidx = filter_id + 1;
1346                        f = &adapter->tids.ftid_tab[fidx];
1347                        if (f->valid) {
1348                                pr_err("%s: IPv6 filter requires 2 indices. IPv4 filter already present at %d. Please remove IPv4 filter first.\n",
1349                                       __func__, fidx);
1350                                return -EBUSY;
1351                        }
1352                }
1353        }
1354
1355        /* Check to make sure that provided filter index is not
1356         * already in use by someone else
1357         */
1358        f = &adapter->tids.ftid_tab[filter_id];
1359        if (f->valid)
1360                return -EBUSY;
1361
1362        fidx = filter_id + adapter->tids.ftid_base;
1363        ret = cxgb4_set_ftid(&adapter->tids, filter_id,
1364                             fs->type ? PF_INET6 : PF_INET,
1365                             chip_ver);
1366        if (ret)
1367                return ret;
1368
1369        /* Check t  make sure the filter requested is writable ... */
1370        ret = writable_filter(f);
1371        if (ret) {
1372                /* Clear the bits we have set above */
1373                cxgb4_clear_ftid(&adapter->tids, filter_id,
1374                                 fs->type ? PF_INET6 : PF_INET,
1375                                 chip_ver);
1376                return ret;
1377        }
1378
1379        if (is_t6(adapter->params.chip) && fs->type &&
1380            ipv6_addr_type((const struct in6_addr *)fs->val.lip) !=
1381            IPV6_ADDR_ANY) {
1382                ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1);
1383                if (ret) {
1384                        cxgb4_clear_ftid(&adapter->tids, filter_id, PF_INET6,
1385                                         chip_ver);
1386                        return ret;
1387                }
1388        }
1389
1390        /* Convert the filter specification into our internal format.
1391         * We copy the PF/VF specification into the Outer VLAN field
1392         * here so the rest of the code -- including the interface to
1393         * the firmware -- doesn't have to constantly do these checks.
1394         */
1395        f->fs = *fs;
1396        f->fs.iq = iq;
1397        f->dev = dev;
1398
1399        iconf = adapter->params.tp.ingress_config;
1400        if (iconf & VNIC_F) {
1401                f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
1402                f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
1403                f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1404                f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1405        } else if (iconf & USE_ENC_IDX_F) {
1406                if (f->fs.val.encap_vld) {
1407                        struct port_info *pi = netdev_priv(f->dev);
1408                        u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
1409
1410                        /* allocate MPS TCAM entry */
1411                        ret = t4_alloc_encap_mac_filt(adapter, pi->viid,
1412                                                      match_all_mac,
1413                                                      match_all_mac,
1414                                                      f->fs.val.vni,
1415                                                      f->fs.mask.vni,
1416                                                      0, 1, 1);
1417                        if (ret < 0)
1418                                goto free_clip;
1419
1420                        f->fs.val.ovlan = ret;
1421                        f->fs.mask.ovlan = 0x1ff;
1422                        f->fs.val.ovlan_vld = 1;
1423                        f->fs.mask.ovlan_vld = 1;
1424                }
1425        }
1426
1427        /* Attempt to set the filter.  If we don't succeed, we clear
1428         * it and return the failure.
1429         */
1430        f->ctx = ctx;
1431        f->tid = fidx; /* Save the actual tid */
1432        ret = set_filter_wr(adapter, filter_id);
1433        if (ret) {
1434                cxgb4_clear_ftid(&adapter->tids, filter_id,
1435                                 fs->type ? PF_INET6 : PF_INET,
1436                                 chip_ver);
1437                clear_filter(adapter, f);
1438        }
1439
1440        return ret;
1441
1442free_clip:
1443        if (is_t6(adapter->params.chip) && f->fs.type)
1444                cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
1445        cxgb4_clear_ftid(&adapter->tids, filter_id,
1446                         fs->type ? PF_INET6 : PF_INET, chip_ver);
1447        return ret;
1448}
1449
1450static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id,
1451                                 struct filter_ctx *ctx)
1452{
1453        struct adapter *adapter = netdev2adap(dev);
1454        struct tid_info *t = &adapter->tids;
1455        struct cpl_abort_req *abort_req;
1456        struct cpl_abort_rpl *abort_rpl;
1457        struct cpl_set_tcb_field *req;
1458        struct ulptx_idata *aligner;
1459        struct work_request_hdr *wr;
1460        struct filter_entry *f;
1461        struct sk_buff *skb;
1462        unsigned int wrlen;
1463        int ret;
1464
1465        netdev_dbg(dev, "%s: filter_id = %d ; nftids = %d\n",
1466                   __func__, filter_id, adapter->tids.nftids);
1467
1468        if (filter_id > adapter->tids.ntids)
1469                return -E2BIG;
1470
1471        f = lookup_tid(t, filter_id);
1472        if (!f) {
1473                netdev_err(dev, "%s: no filter entry for filter_id = %d",
1474                           __func__, filter_id);
1475                return -EINVAL;
1476        }
1477
1478        ret = writable_filter(f);
1479        if (ret)
1480                return ret;
1481
1482        if (!f->valid)
1483                return -EINVAL;
1484
1485        f->ctx = ctx;
1486        f->pending = 1;
1487        wrlen = roundup(sizeof(*wr) + (sizeof(*req) + sizeof(*aligner))
1488                        + sizeof(*abort_req) + sizeof(*abort_rpl), 16);
1489        skb = alloc_skb(wrlen, GFP_KERNEL);
1490        if (!skb) {
1491                netdev_err(dev, "%s: could not allocate skb ..\n", __func__);
1492                return -ENOMEM;
1493        }
1494        set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1495        req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen);
1496        INIT_ULPTX_WR(req, wrlen, 0, 0);
1497        wr = (struct work_request_hdr *)req;
1498        wr++;
1499        req = (struct cpl_set_tcb_field *)wr;
1500        mk_set_tcb_ulp(f, req, TCB_RSS_INFO_W, TCB_RSS_INFO_V(TCB_RSS_INFO_M),
1501                       TCB_RSS_INFO_V(adapter->sge.fw_evtq.abs_id), 0, 1);
1502        aligner = (struct ulptx_idata *)(req + 1);
1503        abort_req = (struct cpl_abort_req *)(aligner + 1);
1504        mk_abort_req_ulp(abort_req, f->tid);
1505        abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
1506        mk_abort_rpl_ulp(abort_rpl, f->tid);
1507        t4_ofld_send(adapter, skb);
1508        return 0;
1509}
1510
1511/* Check a delete filter request for validity and send it to the hardware.
1512 * Return 0 on success, an error number otherwise.  We attach any provided
1513 * filter operation context to the internal filter specification in order to
1514 * facilitate signaling completion of the operation.
1515 */
1516int __cxgb4_del_filter(struct net_device *dev, int filter_id,
1517                       struct ch_filter_specification *fs,
1518                       struct filter_ctx *ctx)
1519{
1520        struct adapter *adapter = netdev2adap(dev);
1521        unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1522        struct filter_entry *f;
1523        unsigned int max_fidx;
1524        int ret;
1525
1526        if (fs && fs->hash) {
1527                if (is_hashfilter(adapter))
1528                        return cxgb4_del_hash_filter(dev, filter_id, ctx);
1529                netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n",
1530                           __func__);
1531                return -EINVAL;
1532        }
1533
1534        max_fidx = adapter->tids.nftids;
1535        if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
1536            filter_id >= max_fidx)
1537                return -E2BIG;
1538
1539        f = &adapter->tids.ftid_tab[filter_id];
1540        ret = writable_filter(f);
1541        if (ret)
1542                return ret;
1543
1544        if (f->valid) {
1545                f->ctx = ctx;
1546                cxgb4_clear_ftid(&adapter->tids, filter_id,
1547                                 f->fs.type ? PF_INET6 : PF_INET,
1548                                 chip_ver);
1549                return del_filter_wr(adapter, filter_id);
1550        }
1551
1552        /* If the caller has passed in a Completion Context then we need to
1553         * mark it as a successful completion so they don't stall waiting
1554         * for it.
1555         */
1556        if (ctx) {
1557                ctx->result = 0;
1558                complete(&ctx->completion);
1559        }
1560        return ret;
1561}
1562
1563int cxgb4_set_filter(struct net_device *dev, int filter_id,
1564                     struct ch_filter_specification *fs)
1565{
1566        struct filter_ctx ctx;
1567        int ret;
1568
1569        init_completion(&ctx.completion);
1570
1571        ret = __cxgb4_set_filter(dev, filter_id, fs, &ctx);
1572        if (ret)
1573                goto out;
1574
1575        /* Wait for reply */
1576        ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
1577        if (!ret)
1578                return -ETIMEDOUT;
1579
1580        ret = ctx.result;
1581out:
1582        return ret;
1583}
1584
1585int cxgb4_del_filter(struct net_device *dev, int filter_id,
1586                     struct ch_filter_specification *fs)
1587{
1588        struct filter_ctx ctx;
1589        int ret;
1590
1591        if (netdev2adap(dev)->flags & CXGB4_SHUTTING_DOWN)
1592                return 0;
1593
1594        init_completion(&ctx.completion);
1595
1596        ret = __cxgb4_del_filter(dev, filter_id, fs, &ctx);
1597        if (ret)
1598                goto out;
1599
1600        /* Wait for reply */
1601        ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
1602        if (!ret)
1603                return -ETIMEDOUT;
1604
1605        ret = ctx.result;
1606out:
1607        return ret;
1608}
1609
1610static int configure_filter_tcb(struct adapter *adap, unsigned int tid,
1611                                struct filter_entry *f)
1612{
1613        if (f->fs.hitcnts)
1614                set_tcb_field(adap, f, tid, TCB_TIMESTAMP_W,
1615                              TCB_TIMESTAMP_V(TCB_TIMESTAMP_M) |
1616                              TCB_RTT_TS_RECENT_AGE_V(TCB_RTT_TS_RECENT_AGE_M),
1617                              TCB_TIMESTAMP_V(0ULL) |
1618                              TCB_RTT_TS_RECENT_AGE_V(0ULL),
1619                              1);
1620
1621        if (f->fs.newdmac)
1622                set_tcb_tflag(adap, f, tid, TF_CCTRL_ECE_S, 1,
1623                              1);
1624
1625        if (f->fs.newvlan == VLAN_INSERT ||
1626            f->fs.newvlan == VLAN_REWRITE)
1627                set_tcb_tflag(adap, f, tid, TF_CCTRL_RFR_S, 1,
1628                              1);
1629        if (f->fs.newsmac)
1630                configure_filter_smac(adap, f);
1631
1632        if (f->fs.nat_mode) {
1633                switch (f->fs.nat_mode) {
1634                case NAT_MODE_DIP:
1635                        set_nat_params(adap, f, tid, true, false, false, false);
1636                        break;
1637
1638                case NAT_MODE_DIP_DP:
1639                        set_nat_params(adap, f, tid, true, false, true, false);
1640                        break;
1641
1642                case NAT_MODE_DIP_DP_SIP:
1643                        set_nat_params(adap, f, tid, true, true, true, false);
1644                        break;
1645                case NAT_MODE_DIP_DP_SP:
1646                        set_nat_params(adap, f, tid, true, false, true, true);
1647                        break;
1648
1649                case NAT_MODE_SIP_SP:
1650                        set_nat_params(adap, f, tid, false, true, false, true);
1651                        break;
1652
1653                case NAT_MODE_DIP_SIP_SP:
1654                        set_nat_params(adap, f, tid, true, true, false, true);
1655                        break;
1656
1657                case NAT_MODE_ALL:
1658                        set_nat_params(adap, f, tid, true, true, true, true);
1659                        break;
1660
1661                default:
1662                        pr_err("%s: Invalid NAT mode: %d\n",
1663                               __func__, f->fs.nat_mode);
1664                        return -EINVAL;
1665                }
1666        }
1667        return 0;
1668}
1669
1670void hash_del_filter_rpl(struct adapter *adap,
1671                         const struct cpl_abort_rpl_rss *rpl)
1672{
1673        unsigned int status = rpl->status;
1674        struct tid_info *t = &adap->tids;
1675        unsigned int tid = GET_TID(rpl);
1676        struct filter_ctx *ctx = NULL;
1677        struct filter_entry *f;
1678
1679        dev_dbg(adap->pdev_dev, "%s: status = %u; tid = %u\n",
1680                __func__, status, tid);
1681
1682        f = lookup_tid(t, tid);
1683        if (!f) {
1684                dev_err(adap->pdev_dev, "%s:could not find filter entry",
1685                        __func__);
1686                return;
1687        }
1688        ctx = f->ctx;
1689        f->ctx = NULL;
1690        clear_filter(adap, f);
1691        cxgb4_remove_tid(t, 0, tid, 0);
1692        kfree(f);
1693        if (ctx) {
1694                ctx->result = 0;
1695                complete(&ctx->completion);
1696        }
1697}
1698
1699void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
1700{
1701        unsigned int ftid = TID_TID_G(AOPEN_ATID_G(ntohl(rpl->atid_status)));
1702        unsigned int status  = AOPEN_STATUS_G(ntohl(rpl->atid_status));
1703        struct tid_info *t = &adap->tids;
1704        unsigned int tid = GET_TID(rpl);
1705        struct filter_ctx *ctx = NULL;
1706        struct filter_entry *f;
1707
1708        dev_dbg(adap->pdev_dev, "%s: tid = %u; atid = %u; status = %u\n",
1709                __func__, tid, ftid, status);
1710
1711        f = lookup_atid(t, ftid);
1712        if (!f) {
1713                dev_err(adap->pdev_dev, "%s:could not find filter entry",
1714                        __func__);
1715                return;
1716        }
1717        ctx = f->ctx;
1718        f->ctx = NULL;
1719
1720        switch (status) {
1721        case CPL_ERR_NONE:
1722                f->tid = tid;
1723                f->pending = 0;
1724                f->valid = 1;
1725                cxgb4_insert_tid(t, f, f->tid, 0);
1726                cxgb4_free_atid(t, ftid);
1727                if (ctx) {
1728                        ctx->tid = f->tid;
1729                        ctx->result = 0;
1730                }
1731                if (configure_filter_tcb(adap, tid, f)) {
1732                        clear_filter(adap, f);
1733                        cxgb4_remove_tid(t, 0, tid, 0);
1734                        kfree(f);
1735                        if (ctx) {
1736                                ctx->result = -EINVAL;
1737                                complete(&ctx->completion);
1738                        }
1739                        return;
1740                }
1741                break;
1742
1743        default:
1744                if (status != CPL_ERR_TCAM_FULL)
1745                        dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n",
1746                                __func__, status);
1747
1748                if (ctx) {
1749                        if (status == CPL_ERR_TCAM_FULL)
1750                                ctx->result = -ENOSPC;
1751                        else
1752                                ctx->result = -EINVAL;
1753                }
1754                clear_filter(adap, f);
1755                cxgb4_free_atid(t, ftid);
1756                kfree(f);
1757        }
1758        if (ctx)
1759                complete(&ctx->completion);
1760}
1761
1762/* Handle a filter write/deletion reply. */
1763void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
1764{
1765        unsigned int tid = GET_TID(rpl);
1766        struct filter_entry *f = NULL;
1767        unsigned int max_fidx;
1768        int idx;
1769
1770        max_fidx = adap->tids.nftids + adap->tids.nsftids;
1771        /* Get the corresponding filter entry for this tid */
1772        if (adap->tids.ftid_tab) {
1773                /* Check this in normal filter region */
1774                idx = tid - adap->tids.ftid_base;
1775                if (idx >= max_fidx)
1776                        return;
1777                f = &adap->tids.ftid_tab[idx];
1778                if (f->tid != tid)
1779                        return;
1780        }
1781
1782        /* We found the filter entry for this tid */
1783        if (f) {
1784                unsigned int ret = TCB_COOKIE_G(rpl->cookie);
1785                struct filter_ctx *ctx;
1786
1787                /* Pull off any filter operation context attached to the
1788                 * filter.
1789                 */
1790                ctx = f->ctx;
1791                f->ctx = NULL;
1792
1793                if (ret == FW_FILTER_WR_FLT_DELETED) {
1794                        /* Clear the filter when we get confirmation from the
1795                         * hardware that the filter has been deleted.
1796                         */
1797                        clear_filter(adap, f);
1798                        if (ctx)
1799                                ctx->result = 0;
1800                } else if (ret == FW_FILTER_WR_FLT_ADDED) {
1801                        int err = 0;
1802
1803                        if (f->fs.newsmac)
1804                                err = configure_filter_smac(adap, f);
1805
1806                        if (!err) {
1807                                f->pending = 0;  /* async setup completed */
1808                                f->valid = 1;
1809                                if (ctx) {
1810                                        ctx->result = 0;
1811                                        ctx->tid = idx;
1812                                }
1813                        } else {
1814                                clear_filter(adap, f);
1815                                if (ctx)
1816                                        ctx->result = err;
1817                        }
1818                } else {
1819                        /* Something went wrong.  Issue a warning about the
1820                         * problem and clear everything out.
1821                         */
1822                        dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
1823                                idx, ret);
1824                        clear_filter(adap, f);
1825                        if (ctx)
1826                                ctx->result = -EINVAL;
1827                }
1828                if (ctx)
1829                        complete(&ctx->completion);
1830        }
1831}
1832
1833void init_hash_filter(struct adapter *adap)
1834{
1835        u32 reg;
1836
1837        /* On T6, verify the necessary register configs and warn the user in
1838         * case of improper config
1839         */
1840        if (is_t6(adap->params.chip)) {
1841                if (is_offload(adap)) {
1842                        if (!(t4_read_reg(adap, TP_GLOBAL_CONFIG_A)
1843                           & ACTIVEFILTERCOUNTS_F)) {
1844                                dev_err(adap->pdev_dev, "Invalid hash filter + ofld config\n");
1845                                return;
1846                        }
1847                } else {
1848                        reg = t4_read_reg(adap, LE_DB_RSP_CODE_0_A);
1849                        if (TCAM_ACTV_HIT_G(reg) != 4) {
1850                                dev_err(adap->pdev_dev, "Invalid hash filter config\n");
1851                                return;
1852                        }
1853
1854                        reg = t4_read_reg(adap, LE_DB_RSP_CODE_1_A);
1855                        if (HASH_ACTV_HIT_G(reg) != 4) {
1856                                dev_err(adap->pdev_dev, "Invalid hash filter config\n");
1857                                return;
1858                        }
1859                }
1860
1861        } else {
1862                dev_err(adap->pdev_dev, "Hash filter supported only on T6\n");
1863                return;
1864        }
1865
1866        adap->params.hash_filter = 1;
1867}
1868