linux/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
<<
>>
Prefs
   1/*
   2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
   3 *
   4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include "cxgb4.h"
  36#include "t4_regs.h"
  37#include "l2t.h"
  38#include "t4fw_api.h"
  39#include "cxgb4_filter.h"
  40
  41static inline bool is_field_set(u32 val, u32 mask)
  42{
  43        return val || mask;
  44}
  45
  46static inline bool unsupported(u32 conf, u32 conf_mask, u32 val, u32 mask)
  47{
  48        return !(conf & conf_mask) && is_field_set(val, mask);
  49}
  50
  51/* Validate filter spec against configuration done on the card. */
  52static int validate_filter(struct net_device *dev,
  53                           struct ch_filter_specification *fs)
  54{
  55        struct adapter *adapter = netdev2adap(dev);
  56        u32 fconf, iconf;
  57
  58        /* Check for unconfigured fields being used. */
  59        fconf = adapter->params.tp.vlan_pri_map;
  60        iconf = adapter->params.tp.ingress_config;
  61
  62        if (unsupported(fconf, FCOE_F, fs->val.fcoe, fs->mask.fcoe) ||
  63            unsupported(fconf, PORT_F, fs->val.iport, fs->mask.iport) ||
  64            unsupported(fconf, TOS_F, fs->val.tos, fs->mask.tos) ||
  65            unsupported(fconf, ETHERTYPE_F, fs->val.ethtype,
  66                        fs->mask.ethtype) ||
  67            unsupported(fconf, MACMATCH_F, fs->val.macidx, fs->mask.macidx) ||
  68            unsupported(fconf, MPSHITTYPE_F, fs->val.matchtype,
  69                        fs->mask.matchtype) ||
  70            unsupported(fconf, FRAGMENTATION_F, fs->val.frag, fs->mask.frag) ||
  71            unsupported(fconf, PROTOCOL_F, fs->val.proto, fs->mask.proto) ||
  72            unsupported(fconf, VNIC_ID_F, fs->val.pfvf_vld,
  73                        fs->mask.pfvf_vld) ||
  74            unsupported(fconf, VNIC_ID_F, fs->val.ovlan_vld,
  75                        fs->mask.ovlan_vld) ||
  76            unsupported(fconf, VLAN_F, fs->val.ivlan_vld, fs->mask.ivlan_vld))
  77                return -EOPNOTSUPP;
  78
  79        /* T4 inconveniently uses the same FT_VNIC_ID_W bits for both the Outer
  80         * VLAN Tag and PF/VF/VFvld fields based on VNIC_F being set
  81         * in TP_INGRESS_CONFIG.  Hense the somewhat crazy checks
  82         * below.  Additionally, since the T4 firmware interface also
  83         * carries that overlap, we need to translate any PF/VF
  84         * specification into that internal format below.
  85         */
  86        if (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
  87            is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld))
  88                return -EOPNOTSUPP;
  89        if (unsupported(iconf, VNIC_F, fs->val.pfvf_vld, fs->mask.pfvf_vld) ||
  90            (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
  91             (iconf & VNIC_F)))
  92                return -EOPNOTSUPP;
  93        if (fs->val.pf > 0x7 || fs->val.vf > 0x7f)
  94                return -ERANGE;
  95        fs->mask.pf &= 0x7;
  96        fs->mask.vf &= 0x7f;
  97
  98        /* If the user is requesting that the filter action loop
  99         * matching packets back out one of our ports, make sure that
 100         * the egress port is in range.
 101         */
 102        if (fs->action == FILTER_SWITCH &&
 103            fs->eport >= adapter->params.nports)
 104                return -ERANGE;
 105
 106        /* Don't allow various trivially obvious bogus out-of-range values... */
 107        if (fs->val.iport >= adapter->params.nports)
 108                return -ERANGE;
 109
 110        /* T4 doesn't support removing VLAN Tags for loop back filters. */
 111        if (is_t4(adapter->params.chip) &&
 112            fs->action == FILTER_SWITCH &&
 113            (fs->newvlan == VLAN_REMOVE ||
 114             fs->newvlan == VLAN_REWRITE))
 115                return -EOPNOTSUPP;
 116
 117        return 0;
 118}
 119
 120static int get_filter_steerq(struct net_device *dev,
 121                             struct ch_filter_specification *fs)
 122{
 123        struct adapter *adapter = netdev2adap(dev);
 124        int iq;
 125
 126        /* If the user has requested steering matching Ingress Packets
 127         * to a specific Queue Set, we need to make sure it's in range
 128         * for the port and map that into the Absolute Queue ID of the
 129         * Queue Set's Response Queue.
 130         */
 131        if (!fs->dirsteer) {
 132                if (fs->iq)
 133                        return -EINVAL;
 134                iq = 0;
 135        } else {
 136                struct port_info *pi = netdev_priv(dev);
 137
 138                /* If the iq id is greater than the number of qsets,
 139                 * then assume it is an absolute qid.
 140                 */
 141                if (fs->iq < pi->nqsets)
 142                        iq = adapter->sge.ethrxq[pi->first_qset +
 143                                                 fs->iq].rspq.abs_id;
 144                else
 145                        iq = fs->iq;
 146        }
 147
 148        return iq;
 149}
 150
 151static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family)
 152{
 153        spin_lock_bh(&t->ftid_lock);
 154
 155        if (test_bit(fidx, t->ftid_bmap)) {
 156                spin_unlock_bh(&t->ftid_lock);
 157                return -EBUSY;
 158        }
 159
 160        if (family == PF_INET)
 161                __set_bit(fidx, t->ftid_bmap);
 162        else
 163                bitmap_allocate_region(t->ftid_bmap, fidx, 2);
 164
 165        spin_unlock_bh(&t->ftid_lock);
 166        return 0;
 167}
 168
 169static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family)
 170{
 171        spin_lock_bh(&t->ftid_lock);
 172        if (family == PF_INET)
 173                __clear_bit(fidx, t->ftid_bmap);
 174        else
 175                bitmap_release_region(t->ftid_bmap, fidx, 2);
 176        spin_unlock_bh(&t->ftid_lock);
 177}
 178
 179/* Delete the filter at a specified index. */
 180static int del_filter_wr(struct adapter *adapter, int fidx)
 181{
 182        struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
 183        struct fw_filter_wr *fwr;
 184        struct sk_buff *skb;
 185        unsigned int len;
 186
 187        len = sizeof(*fwr);
 188
 189        skb = alloc_skb(len, GFP_KERNEL);
 190        if (!skb)
 191                return -ENOMEM;
 192
 193        fwr = (struct fw_filter_wr *)__skb_put(skb, len);
 194        t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
 195
 196        /* Mark the filter as "pending" and ship off the Filter Work Request.
 197         * When we get the Work Request Reply we'll clear the pending status.
 198         */
 199        f->pending = 1;
 200        t4_mgmt_tx(adapter, skb);
 201        return 0;
 202}
 203
 204/* Send a Work Request to write the filter at a specified index.  We construct
 205 * a Firmware Filter Work Request to have the work done and put the indicated
 206 * filter into "pending" mode which will prevent any further actions against
 207 * it till we get a reply from the firmware on the completion status of the
 208 * request.
 209 */
 210int set_filter_wr(struct adapter *adapter, int fidx)
 211{
 212        struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
 213        struct fw_filter_wr *fwr;
 214        struct sk_buff *skb;
 215
 216        skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
 217        if (!skb)
 218                return -ENOMEM;
 219
 220        /* If the new filter requires loopback Destination MAC and/or VLAN
 221         * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
 222         * the filter.
 223         */
 224        if (f->fs.newdmac || f->fs.newvlan) {
 225                /* allocate L2T entry for new filter */
 226                f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
 227                                                f->fs.eport, f->fs.dmac);
 228                if (!f->l2t) {
 229                        kfree_skb(skb);
 230                        return -ENOMEM;
 231                }
 232        }
 233
 234        fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
 235        memset(fwr, 0, sizeof(*fwr));
 236
 237        /* It would be nice to put most of the following in t4_hw.c but most
 238         * of the work is translating the cxgbtool ch_filter_specification
 239         * into the Work Request and the definition of that structure is
 240         * currently in cxgbtool.h which isn't appropriate to pull into the
 241         * common code.  We may eventually try to come up with a more neutral
 242         * filter specification structure but for now it's easiest to simply
 243         * put this fairly direct code in line ...
 244         */
 245        fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
 246        fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr) / 16));
 247        fwr->tid_to_iq =
 248                htonl(FW_FILTER_WR_TID_V(f->tid) |
 249                      FW_FILTER_WR_RQTYPE_V(f->fs.type) |
 250                      FW_FILTER_WR_NOREPLY_V(0) |
 251                      FW_FILTER_WR_IQ_V(f->fs.iq));
 252        fwr->del_filter_to_l2tix =
 253                htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
 254                      FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
 255                      FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
 256                      FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
 257                      FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
 258                      FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
 259                      FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
 260                      FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
 261                      FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
 262                                             f->fs.newvlan == VLAN_REWRITE) |
 263                      FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
 264                                            f->fs.newvlan == VLAN_REWRITE) |
 265                      FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
 266                      FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
 267                      FW_FILTER_WR_PRIO_V(f->fs.prio) |
 268                      FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
 269        fwr->ethtype = htons(f->fs.val.ethtype);
 270        fwr->ethtypem = htons(f->fs.mask.ethtype);
 271        fwr->frag_to_ovlan_vldm =
 272                (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
 273                 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
 274                 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
 275                 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
 276                 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
 277                 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
 278        fwr->smac_sel = 0;
 279        fwr->rx_chan_rx_rpl_iq =
 280                htons(FW_FILTER_WR_RX_CHAN_V(0) |
 281                      FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
 282        fwr->maci_to_matchtypem =
 283                htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
 284                      FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
 285                      FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
 286                      FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
 287                      FW_FILTER_WR_PORT_V(f->fs.val.iport) |
 288                      FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
 289                      FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
 290                      FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
 291        fwr->ptcl = f->fs.val.proto;
 292        fwr->ptclm = f->fs.mask.proto;
 293        fwr->ttyp = f->fs.val.tos;
 294        fwr->ttypm = f->fs.mask.tos;
 295        fwr->ivlan = htons(f->fs.val.ivlan);
 296        fwr->ivlanm = htons(f->fs.mask.ivlan);
 297        fwr->ovlan = htons(f->fs.val.ovlan);
 298        fwr->ovlanm = htons(f->fs.mask.ovlan);
 299        memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
 300        memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
 301        memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
 302        memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
 303        fwr->lp = htons(f->fs.val.lport);
 304        fwr->lpm = htons(f->fs.mask.lport);
 305        fwr->fp = htons(f->fs.val.fport);
 306        fwr->fpm = htons(f->fs.mask.fport);
 307        if (f->fs.newsmac)
 308                memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
 309
 310        /* Mark the filter as "pending" and ship off the Filter Work Request.
 311         * When we get the Work Request Reply we'll clear the pending status.
 312         */
 313        f->pending = 1;
 314        set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
 315        t4_ofld_send(adapter, skb);
 316        return 0;
 317}
 318
 319/* Return an error number if the indicated filter isn't writable ... */
 320int writable_filter(struct filter_entry *f)
 321{
 322        if (f->locked)
 323                return -EPERM;
 324        if (f->pending)
 325                return -EBUSY;
 326
 327        return 0;
 328}
 329
 330/* Delete the filter at the specified index (if valid).  The checks for all
 331 * the common problems with doing this like the filter being locked, currently
 332 * pending in another operation, etc.
 333 */
 334int delete_filter(struct adapter *adapter, unsigned int fidx)
 335{
 336        struct filter_entry *f;
 337        int ret;
 338
 339        if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
 340                return -EINVAL;
 341
 342        f = &adapter->tids.ftid_tab[fidx];
 343        ret = writable_filter(f);
 344        if (ret)
 345                return ret;
 346        if (f->valid)
 347                return del_filter_wr(adapter, fidx);
 348
 349        return 0;
 350}
 351
 352/* Clear a filter and release any of its resources that we own.  This also
 353 * clears the filter's "pending" status.
 354 */
 355void clear_filter(struct adapter *adap, struct filter_entry *f)
 356{
 357        /* If the new or old filter have loopback rewriteing rules then we'll
 358         * need to free any existing Layer Two Table (L2T) entries of the old
 359         * filter rule.  The firmware will handle freeing up any Source MAC
 360         * Table (SMT) entries used for rewriting Source MAC Addresses in
 361         * loopback rules.
 362         */
 363        if (f->l2t)
 364                cxgb4_l2t_release(f->l2t);
 365
 366        /* The zeroing of the filter rule below clears the filter valid,
 367         * pending, locked flags, l2t pointer, etc. so it's all we need for
 368         * this operation.
 369         */
 370        memset(f, 0, sizeof(*f));
 371}
 372
 373void clear_all_filters(struct adapter *adapter)
 374{
 375        unsigned int i;
 376
 377        if (adapter->tids.ftid_tab) {
 378                struct filter_entry *f = &adapter->tids.ftid_tab[0];
 379                unsigned int max_ftid = adapter->tids.nftids +
 380                                        adapter->tids.nsftids;
 381
 382                for (i = 0; i < max_ftid; i++, f++)
 383                        if (f->valid || f->pending)
 384                                clear_filter(adapter, f);
 385        }
 386}
 387
 388/* Fill up default masks for set match fields. */
 389static void fill_default_mask(struct ch_filter_specification *fs)
 390{
 391        unsigned int lip = 0, lip_mask = 0;
 392        unsigned int fip = 0, fip_mask = 0;
 393        unsigned int i;
 394
 395        if (fs->val.iport && !fs->mask.iport)
 396                fs->mask.iport |= ~0;
 397        if (fs->val.fcoe && !fs->mask.fcoe)
 398                fs->mask.fcoe |= ~0;
 399        if (fs->val.matchtype && !fs->mask.matchtype)
 400                fs->mask.matchtype |= ~0;
 401        if (fs->val.macidx && !fs->mask.macidx)
 402                fs->mask.macidx |= ~0;
 403        if (fs->val.ethtype && !fs->mask.ethtype)
 404                fs->mask.ethtype |= ~0;
 405        if (fs->val.ivlan && !fs->mask.ivlan)
 406                fs->mask.ivlan |= ~0;
 407        if (fs->val.ovlan && !fs->mask.ovlan)
 408                fs->mask.ovlan |= ~0;
 409        if (fs->val.frag && !fs->mask.frag)
 410                fs->mask.frag |= ~0;
 411        if (fs->val.tos && !fs->mask.tos)
 412                fs->mask.tos |= ~0;
 413        if (fs->val.proto && !fs->mask.proto)
 414                fs->mask.proto |= ~0;
 415
 416        for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) {
 417                lip |= fs->val.lip[i];
 418                lip_mask |= fs->mask.lip[i];
 419                fip |= fs->val.fip[i];
 420                fip_mask |= fs->mask.fip[i];
 421        }
 422
 423        if (lip && !lip_mask)
 424                memset(fs->mask.lip, ~0, sizeof(fs->mask.lip));
 425
 426        if (fip && !fip_mask)
 427                memset(fs->mask.fip, ~0, sizeof(fs->mask.lip));
 428
 429        if (fs->val.lport && !fs->mask.lport)
 430                fs->mask.lport = ~0;
 431        if (fs->val.fport && !fs->mask.fport)
 432                fs->mask.fport = ~0;
 433}
 434
 435/* Check a Chelsio Filter Request for validity, convert it into our internal
 436 * format and send it to the hardware.  Return 0 on success, an error number
 437 * otherwise.  We attach any provided filter operation context to the internal
 438 * filter specification in order to facilitate signaling completion of the
 439 * operation.
 440 */
 441int __cxgb4_set_filter(struct net_device *dev, int filter_id,
 442                       struct ch_filter_specification *fs,
 443                       struct filter_ctx *ctx)
 444{
 445        struct adapter *adapter = netdev2adap(dev);
 446        unsigned int max_fidx, fidx;
 447        struct filter_entry *f;
 448        u32 iconf;
 449        int iq, ret;
 450
 451        max_fidx = adapter->tids.nftids;
 452        if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
 453            filter_id >= max_fidx)
 454                return -E2BIG;
 455
 456        fill_default_mask(fs);
 457
 458        ret = validate_filter(dev, fs);
 459        if (ret)
 460                return ret;
 461
 462        iq = get_filter_steerq(dev, fs);
 463        if (iq < 0)
 464                return iq;
 465
 466        /* IPv6 filters occupy four slots and must be aligned on
 467         * four-slot boundaries.  IPv4 filters only occupy a single
 468         * slot and have no alignment requirements but writing a new
 469         * IPv4 filter into the middle of an existing IPv6 filter
 470         * requires clearing the old IPv6 filter and hence we prevent
 471         * insertion.
 472         */
 473        if (fs->type == 0) { /* IPv4 */
 474                /* If our IPv4 filter isn't being written to a
 475                 * multiple of four filter index and there's an IPv6
 476                 * filter at the multiple of 4 base slot, then we
 477                 * prevent insertion.
 478                 */
 479                fidx = filter_id & ~0x3;
 480                if (fidx != filter_id &&
 481                    adapter->tids.ftid_tab[fidx].fs.type) {
 482                        f = &adapter->tids.ftid_tab[fidx];
 483                        if (f->valid) {
 484                                dev_err(adapter->pdev_dev,
 485                                        "Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n",
 486                                        fidx, fidx + 3);
 487                                return -EINVAL;
 488                        }
 489                }
 490        } else { /* IPv6 */
 491                /* Ensure that the IPv6 filter is aligned on a
 492                 * multiple of 4 boundary.
 493                 */
 494                if (filter_id & 0x3) {
 495                        dev_err(adapter->pdev_dev,
 496                                "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
 497                        return -EINVAL;
 498                }
 499
 500                /* Check all except the base overlapping IPv4 filter slots. */
 501                for (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) {
 502                        f = &adapter->tids.ftid_tab[fidx];
 503                        if (f->valid) {
 504                                dev_err(adapter->pdev_dev,
 505                                        "Invalid location.  IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
 506                                        fidx);
 507                                return -EINVAL;
 508                        }
 509                }
 510        }
 511
 512        /* Check to make sure that provided filter index is not
 513         * already in use by someone else
 514         */
 515        f = &adapter->tids.ftid_tab[filter_id];
 516        if (f->valid)
 517                return -EBUSY;
 518
 519        fidx = filter_id + adapter->tids.ftid_base;
 520        ret = cxgb4_set_ftid(&adapter->tids, filter_id,
 521                             fs->type ? PF_INET6 : PF_INET);
 522        if (ret)
 523                return ret;
 524
 525        /* Check to make sure the filter requested is writable ... */
 526        ret = writable_filter(f);
 527        if (ret) {
 528                /* Clear the bits we have set above */
 529                cxgb4_clear_ftid(&adapter->tids, filter_id,
 530                                 fs->type ? PF_INET6 : PF_INET);
 531                return ret;
 532        }
 533
 534        /* Clear out any old resources being used by the filter before
 535         * we start constructing the new filter.
 536         */
 537        if (f->valid)
 538                clear_filter(adapter, f);
 539
 540        /* Convert the filter specification into our internal format.
 541         * We copy the PF/VF specification into the Outer VLAN field
 542         * here so the rest of the code -- including the interface to
 543         * the firmware -- doesn't have to constantly do these checks.
 544         */
 545        f->fs = *fs;
 546        f->fs.iq = iq;
 547        f->dev = dev;
 548
 549        iconf = adapter->params.tp.ingress_config;
 550        if (iconf & VNIC_F) {
 551                f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
 552                f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
 553                f->fs.val.ovlan_vld = fs->val.pfvf_vld;
 554                f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
 555        }
 556
 557        /* Attempt to set the filter.  If we don't succeed, we clear
 558         * it and return the failure.
 559         */
 560        f->ctx = ctx;
 561        f->tid = fidx; /* Save the actual tid */
 562        ret = set_filter_wr(adapter, filter_id);
 563        if (ret) {
 564                cxgb4_clear_ftid(&adapter->tids, filter_id,
 565                                 fs->type ? PF_INET6 : PF_INET);
 566                clear_filter(adapter, f);
 567        }
 568
 569        return ret;
 570}
 571
 572/* Check a delete filter request for validity and send it to the hardware.
 573 * Return 0 on success, an error number otherwise.  We attach any provided
 574 * filter operation context to the internal filter specification in order to
 575 * facilitate signaling completion of the operation.
 576 */
 577int __cxgb4_del_filter(struct net_device *dev, int filter_id,
 578                       struct filter_ctx *ctx)
 579{
 580        struct adapter *adapter = netdev2adap(dev);
 581        struct filter_entry *f;
 582        unsigned int max_fidx;
 583        int ret;
 584
 585        max_fidx = adapter->tids.nftids;
 586        if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
 587            filter_id >= max_fidx)
 588                return -E2BIG;
 589
 590        f = &adapter->tids.ftid_tab[filter_id];
 591        ret = writable_filter(f);
 592        if (ret)
 593                return ret;
 594
 595        if (f->valid) {
 596                f->ctx = ctx;
 597                cxgb4_clear_ftid(&adapter->tids, filter_id,
 598                                 f->fs.type ? PF_INET6 : PF_INET);
 599                return del_filter_wr(adapter, filter_id);
 600        }
 601
 602        /* If the caller has passed in a Completion Context then we need to
 603         * mark it as a successful completion so they don't stall waiting
 604         * for it.
 605         */
 606        if (ctx) {
 607                ctx->result = 0;
 608                complete(&ctx->completion);
 609        }
 610        return ret;
 611}
 612
 613int cxgb4_set_filter(struct net_device *dev, int filter_id,
 614                     struct ch_filter_specification *fs)
 615{
 616        struct filter_ctx ctx;
 617        int ret;
 618
 619        init_completion(&ctx.completion);
 620
 621        ret = __cxgb4_set_filter(dev, filter_id, fs, &ctx);
 622        if (ret)
 623                goto out;
 624
 625        /* Wait for reply */
 626        ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
 627        if (!ret)
 628                return -ETIMEDOUT;
 629
 630        ret = ctx.result;
 631out:
 632        return ret;
 633}
 634
 635int cxgb4_del_filter(struct net_device *dev, int filter_id)
 636{
 637        struct filter_ctx ctx;
 638        int ret;
 639
 640        init_completion(&ctx.completion);
 641
 642        ret = __cxgb4_del_filter(dev, filter_id, &ctx);
 643        if (ret)
 644                goto out;
 645
 646        /* Wait for reply */
 647        ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
 648        if (!ret)
 649                return -ETIMEDOUT;
 650
 651        ret = ctx.result;
 652out:
 653        return ret;
 654}
 655
 656/* Handle a filter write/deletion reply. */
 657void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
 658{
 659        unsigned int tid = GET_TID(rpl);
 660        struct filter_entry *f = NULL;
 661        unsigned int max_fidx;
 662        int idx;
 663
 664        max_fidx = adap->tids.nftids + adap->tids.nsftids;
 665        /* Get the corresponding filter entry for this tid */
 666        if (adap->tids.ftid_tab) {
 667                /* Check this in normal filter region */
 668                idx = tid - adap->tids.ftid_base;
 669                if (idx >= max_fidx)
 670                        return;
 671                f = &adap->tids.ftid_tab[idx];
 672                if (f->tid != tid)
 673                        return;
 674        }
 675
 676        /* We found the filter entry for this tid */
 677        if (f) {
 678                unsigned int ret = TCB_COOKIE_G(rpl->cookie);
 679                struct filter_ctx *ctx;
 680
 681                /* Pull off any filter operation context attached to the
 682                 * filter.
 683                 */
 684                ctx = f->ctx;
 685                f->ctx = NULL;
 686
 687                if (ret == FW_FILTER_WR_FLT_DELETED) {
 688                        /* Clear the filter when we get confirmation from the
 689                         * hardware that the filter has been deleted.
 690                         */
 691                        clear_filter(adap, f);
 692                        if (ctx)
 693                                ctx->result = 0;
 694                } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
 695                        dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
 696                                idx);
 697                        clear_filter(adap, f);
 698                        if (ctx)
 699                                ctx->result = -ENOMEM;
 700                } else if (ret == FW_FILTER_WR_FLT_ADDED) {
 701                        f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
 702                        f->pending = 0;  /* asynchronous setup completed */
 703                        f->valid = 1;
 704                        if (ctx) {
 705                                ctx->result = 0;
 706                                ctx->tid = idx;
 707                        }
 708                } else {
 709                        /* Something went wrong.  Issue a warning about the
 710                         * problem and clear everything out.
 711                         */
 712                        dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
 713                                idx, ret);
 714                        clear_filter(adap, f);
 715                        if (ctx)
 716                                ctx->result = -EINVAL;
 717                }
 718                if (ctx)
 719                        complete(&ctx->completion);
 720        }
 721}
 722