linux/drivers/net/ethernet/chelsio/cxgb4/l2t.c
<<
>>
Prefs
   1/*
   2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
   3 *
   4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include <linux/skbuff.h>
  36#include <linux/netdevice.h>
  37#include <linux/if.h>
  38#include <linux/if_vlan.h>
  39#include <linux/jhash.h>
  40#include <linux/module.h>
  41#include <linux/debugfs.h>
  42#include <linux/seq_file.h>
  43#include <net/neighbour.h>
  44#include "cxgb4.h"
  45#include "l2t.h"
  46#include "t4_msg.h"
  47#include "t4fw_api.h"
  48#include "t4_regs.h"
  49#include "t4_values.h"
  50
  51/* identifies sync vs async L2T_WRITE_REQs */
  52#define SYNC_WR_S    12
  53#define SYNC_WR_V(x) ((x) << SYNC_WR_S)
  54#define SYNC_WR_F    SYNC_WR_V(1)
  55
  56struct l2t_data {
  57        unsigned int l2t_start;     /* start index of our piece of the L2T */
  58        unsigned int l2t_size;      /* number of entries in l2tab */
  59        rwlock_t lock;
  60        atomic_t nfree;             /* number of free entries */
  61        struct l2t_entry *rover;    /* starting point for next allocation */
  62        struct l2t_entry l2tab[];  /* MUST BE LAST */
  63};
  64
  65static inline unsigned int vlan_prio(const struct l2t_entry *e)
  66{
  67        return e->vlan >> VLAN_PRIO_SHIFT;
  68}
  69
  70static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
  71{
  72        if (atomic_add_return(1, &e->refcnt) == 1)  /* 0 -> 1 transition */
  73                atomic_dec(&d->nfree);
  74}
  75
  76/*
  77 * To avoid having to check address families we do not allow v4 and v6
  78 * neighbors to be on the same hash chain.  We keep v4 entries in the first
  79 * half of available hash buckets and v6 in the second.  We need at least two
  80 * entries in our L2T for this scheme to work.
  81 */
  82enum {
  83        L2T_MIN_HASH_BUCKETS = 2,
  84};
  85
  86static inline unsigned int arp_hash(struct l2t_data *d, const u32 *key,
  87                                    int ifindex)
  88{
  89        unsigned int l2t_size_half = d->l2t_size / 2;
  90
  91        return jhash_2words(*key, ifindex, 0) % l2t_size_half;
  92}
  93
  94static inline unsigned int ipv6_hash(struct l2t_data *d, const u32 *key,
  95                                     int ifindex)
  96{
  97        unsigned int l2t_size_half = d->l2t_size / 2;
  98        u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
  99
 100        return (l2t_size_half +
 101                (jhash_2words(xor, ifindex, 0) % l2t_size_half));
 102}
 103
 104static unsigned int addr_hash(struct l2t_data *d, const u32 *addr,
 105                              int addr_len, int ifindex)
 106{
 107        return addr_len == 4 ? arp_hash(d, addr, ifindex) :
 108                               ipv6_hash(d, addr, ifindex);
 109}
 110
 111/*
 112 * Checks if an L2T entry is for the given IP/IPv6 address.  It does not check
 113 * whether the L2T entry and the address are of the same address family.
 114 * Callers ensure an address is only checked against L2T entries of the same
 115 * family, something made trivial by the separation of IP and IPv6 hash chains
 116 * mentioned above.  Returns 0 if there's a match,
 117 */
 118static int addreq(const struct l2t_entry *e, const u32 *addr)
 119{
 120        if (e->v6)
 121                return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) |
 122                       (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]);
 123        return e->addr[0] ^ addr[0];
 124}
 125
 126static void neigh_replace(struct l2t_entry *e, struct neighbour *n)
 127{
 128        neigh_hold(n);
 129        if (e->neigh)
 130                neigh_release(e->neigh);
 131        e->neigh = n;
 132}
 133
 134/*
 135 * Write an L2T entry.  Must be called with the entry locked.
 136 * The write may be synchronous or asynchronous.
 137 */
 138static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
 139{
 140        struct l2t_data *d = adap->l2t;
 141        unsigned int l2t_idx = e->idx + d->l2t_start;
 142        struct sk_buff *skb;
 143        struct cpl_l2t_write_req *req;
 144
 145        skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
 146        if (!skb)
 147                return -ENOMEM;
 148
 149        req = __skb_put(skb, sizeof(*req));
 150        INIT_TP_WR(req, 0);
 151
 152        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
 153                                        l2t_idx | (sync ? SYNC_WR_F : 0) |
 154                                        TID_QID_V(adap->sge.fw_evtq.abs_id)));
 155        req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
 156        req->l2t_idx = htons(l2t_idx);
 157        req->vlan = htons(e->vlan);
 158        if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
 159                memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
 160        memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
 161
 162        t4_mgmt_tx(adap, skb);
 163
 164        if (sync && e->state != L2T_STATE_SWITCHING)
 165                e->state = L2T_STATE_SYNC_WRITE;
 166        return 0;
 167}
 168
 169/*
 170 * Send packets waiting in an L2T entry's ARP queue.  Must be called with the
 171 * entry locked.
 172 */
 173static void send_pending(struct adapter *adap, struct l2t_entry *e)
 174{
 175        struct sk_buff *skb;
 176
 177        while ((skb = __skb_dequeue(&e->arpq)) != NULL)
 178                t4_ofld_send(adap, skb);
 179}
 180
 181/*
 182 * Process a CPL_L2T_WRITE_RPL.  Wake up the ARP queue if it completes a
 183 * synchronous L2T_WRITE.  Note that the TID in the reply is really the L2T
 184 * index it refers to.
 185 */
 186void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
 187{
 188        struct l2t_data *d = adap->l2t;
 189        unsigned int tid = GET_TID(rpl);
 190        unsigned int l2t_idx = tid % L2T_SIZE;
 191
 192        if (unlikely(rpl->status != CPL_ERR_NONE)) {
 193                dev_err(adap->pdev_dev,
 194                        "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
 195                        rpl->status, l2t_idx);
 196                return;
 197        }
 198
 199        if (tid & SYNC_WR_F) {
 200                struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start];
 201
 202                spin_lock(&e->lock);
 203                if (e->state != L2T_STATE_SWITCHING) {
 204                        send_pending(adap, e);
 205                        e->state = (e->neigh->nud_state & NUD_STALE) ?
 206                                        L2T_STATE_STALE : L2T_STATE_VALID;
 207                }
 208                spin_unlock(&e->lock);
 209        }
 210}
 211
 212/*
 213 * Add a packet to an L2T entry's queue of packets awaiting resolution.
 214 * Must be called with the entry's lock held.
 215 */
 216static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
 217{
 218        __skb_queue_tail(&e->arpq, skb);
 219}
 220
 221int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
 222                   struct l2t_entry *e)
 223{
 224        struct adapter *adap = netdev2adap(dev);
 225
 226again:
 227        switch (e->state) {
 228        case L2T_STATE_STALE:     /* entry is stale, kick off revalidation */
 229                neigh_event_send(e->neigh, NULL);
 230                spin_lock_bh(&e->lock);
 231                if (e->state == L2T_STATE_STALE)
 232                        e->state = L2T_STATE_VALID;
 233                spin_unlock_bh(&e->lock);
 234                fallthrough;
 235        case L2T_STATE_VALID:     /* fast-path, send the packet on */
 236                return t4_ofld_send(adap, skb);
 237        case L2T_STATE_RESOLVING:
 238        case L2T_STATE_SYNC_WRITE:
 239                spin_lock_bh(&e->lock);
 240                if (e->state != L2T_STATE_SYNC_WRITE &&
 241                    e->state != L2T_STATE_RESOLVING) {
 242                        spin_unlock_bh(&e->lock);
 243                        goto again;
 244                }
 245                arpq_enqueue(e, skb);
 246                spin_unlock_bh(&e->lock);
 247
 248                if (e->state == L2T_STATE_RESOLVING &&
 249                    !neigh_event_send(e->neigh, NULL)) {
 250                        spin_lock_bh(&e->lock);
 251                        if (e->state == L2T_STATE_RESOLVING &&
 252                            !skb_queue_empty(&e->arpq))
 253                                write_l2e(adap, e, 1);
 254                        spin_unlock_bh(&e->lock);
 255                }
 256        }
 257        return 0;
 258}
 259EXPORT_SYMBOL(cxgb4_l2t_send);
 260
 261/*
 262 * Allocate a free L2T entry.  Must be called with l2t_data.lock held.
 263 */
 264static struct l2t_entry *alloc_l2e(struct l2t_data *d)
 265{
 266        struct l2t_entry *end, *e, **p;
 267
 268        if (!atomic_read(&d->nfree))
 269                return NULL;
 270
 271        /* there's definitely a free entry */
 272        for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e)
 273                if (atomic_read(&e->refcnt) == 0)
 274                        goto found;
 275
 276        for (e = d->l2tab; atomic_read(&e->refcnt); ++e)
 277                ;
 278found:
 279        d->rover = e + 1;
 280        atomic_dec(&d->nfree);
 281
 282        /*
 283         * The entry we found may be an inactive entry that is
 284         * presently in the hash table.  We need to remove it.
 285         */
 286        if (e->state < L2T_STATE_SWITCHING)
 287                for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
 288                        if (*p == e) {
 289                                *p = e->next;
 290                                e->next = NULL;
 291                                break;
 292                        }
 293
 294        e->state = L2T_STATE_UNUSED;
 295        return e;
 296}
 297
 298static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan,
 299                                           u8 port, u8 *dmac)
 300{
 301        struct l2t_entry *end, *e, **p;
 302        struct l2t_entry *first_free = NULL;
 303
 304        for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
 305                if (atomic_read(&e->refcnt) == 0) {
 306                        if (!first_free)
 307                                first_free = e;
 308                } else {
 309                        if (e->state == L2T_STATE_SWITCHING) {
 310                                if (ether_addr_equal(e->dmac, dmac) &&
 311                                    (e->vlan == vlan) && (e->lport == port))
 312                                        goto exists;
 313                        }
 314                }
 315        }
 316
 317        if (first_free) {
 318                e = first_free;
 319                goto found;
 320        }
 321
 322        return NULL;
 323
 324found:
 325        /* The entry we found may be an inactive entry that is
 326         * presently in the hash table.  We need to remove it.
 327         */
 328        if (e->state < L2T_STATE_SWITCHING)
 329                for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
 330                        if (*p == e) {
 331                                *p = e->next;
 332                                e->next = NULL;
 333                                break;
 334                        }
 335        e->state = L2T_STATE_UNUSED;
 336
 337exists:
 338        return e;
 339}
 340
 341/* Called when an L2T entry has no more users.  The entry is left in the hash
 342 * table since it is likely to be reused but we also bump nfree to indicate
 343 * that the entry can be reallocated for a different neighbor.  We also drop
 344 * the existing neighbor reference in case the neighbor is going away and is
 345 * waiting on our reference.
 346 *
 347 * Because entries can be reallocated to other neighbors once their ref count
 348 * drops to 0 we need to take the entry's lock to avoid races with a new
 349 * incarnation.
 350 */
 351static void _t4_l2e_free(struct l2t_entry *e)
 352{
 353        struct l2t_data *d;
 354
 355        if (atomic_read(&e->refcnt) == 0) {  /* hasn't been recycled */
 356                if (e->neigh) {
 357                        neigh_release(e->neigh);
 358                        e->neigh = NULL;
 359                }
 360                __skb_queue_purge(&e->arpq);
 361        }
 362
 363        d = container_of(e, struct l2t_data, l2tab[e->idx]);
 364        atomic_inc(&d->nfree);
 365}
 366
 367/* Locked version of _t4_l2e_free */
 368static void t4_l2e_free(struct l2t_entry *e)
 369{
 370        struct l2t_data *d;
 371
 372        spin_lock_bh(&e->lock);
 373        if (atomic_read(&e->refcnt) == 0) {  /* hasn't been recycled */
 374                if (e->neigh) {
 375                        neigh_release(e->neigh);
 376                        e->neigh = NULL;
 377                }
 378                __skb_queue_purge(&e->arpq);
 379        }
 380        spin_unlock_bh(&e->lock);
 381
 382        d = container_of(e, struct l2t_data, l2tab[e->idx]);
 383        atomic_inc(&d->nfree);
 384}
 385
 386void cxgb4_l2t_release(struct l2t_entry *e)
 387{
 388        if (atomic_dec_and_test(&e->refcnt))
 389                t4_l2e_free(e);
 390}
 391EXPORT_SYMBOL(cxgb4_l2t_release);
 392
 393/*
 394 * Update an L2T entry that was previously used for the same next hop as neigh.
 395 * Must be called with softirqs disabled.
 396 */
 397static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
 398{
 399        unsigned int nud_state;
 400
 401        spin_lock(&e->lock);                /* avoid race with t4_l2t_free */
 402        if (neigh != e->neigh)
 403                neigh_replace(e, neigh);
 404        nud_state = neigh->nud_state;
 405        if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
 406            !(nud_state & NUD_VALID))
 407                e->state = L2T_STATE_RESOLVING;
 408        else if (nud_state & NUD_CONNECTED)
 409                e->state = L2T_STATE_VALID;
 410        else
 411                e->state = L2T_STATE_STALE;
 412        spin_unlock(&e->lock);
 413}
 414
 415struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
 416                                const struct net_device *physdev,
 417                                unsigned int priority)
 418{
 419        u8 lport;
 420        u16 vlan;
 421        struct l2t_entry *e;
 422        unsigned int addr_len = neigh->tbl->key_len;
 423        u32 *addr = (u32 *)neigh->primary_key;
 424        int ifidx = neigh->dev->ifindex;
 425        int hash = addr_hash(d, addr, addr_len, ifidx);
 426
 427        if (neigh->dev->flags & IFF_LOOPBACK)
 428                lport = netdev2pinfo(physdev)->tx_chan + 4;
 429        else
 430                lport = netdev2pinfo(physdev)->lport;
 431
 432        if (is_vlan_dev(neigh->dev)) {
 433                vlan = vlan_dev_vlan_id(neigh->dev);
 434                vlan |= vlan_dev_get_egress_qos_mask(neigh->dev, priority);
 435        } else {
 436                vlan = VLAN_NONE;
 437        }
 438
 439        write_lock_bh(&d->lock);
 440        for (e = d->l2tab[hash].first; e; e = e->next)
 441                if (!addreq(e, addr) && e->ifindex == ifidx &&
 442                    e->vlan == vlan && e->lport == lport) {
 443                        l2t_hold(d, e);
 444                        if (atomic_read(&e->refcnt) == 1)
 445                                reuse_entry(e, neigh);
 446                        goto done;
 447                }
 448
 449        /* Need to allocate a new entry */
 450        e = alloc_l2e(d);
 451        if (e) {
 452                spin_lock(&e->lock);          /* avoid race with t4_l2t_free */
 453                e->state = L2T_STATE_RESOLVING;
 454                if (neigh->dev->flags & IFF_LOOPBACK)
 455                        memcpy(e->dmac, physdev->dev_addr, sizeof(e->dmac));
 456                memcpy(e->addr, addr, addr_len);
 457                e->ifindex = ifidx;
 458                e->hash = hash;
 459                e->lport = lport;
 460                e->v6 = addr_len == 16;
 461                atomic_set(&e->refcnt, 1);
 462                neigh_replace(e, neigh);
 463                e->vlan = vlan;
 464                e->next = d->l2tab[hash].first;
 465                d->l2tab[hash].first = e;
 466                spin_unlock(&e->lock);
 467        }
 468done:
 469        write_unlock_bh(&d->lock);
 470        return e;
 471}
 472EXPORT_SYMBOL(cxgb4_l2t_get);
 473
 474u64 cxgb4_select_ntuple(struct net_device *dev,
 475                        const struct l2t_entry *l2t)
 476{
 477        struct adapter *adap = netdev2adap(dev);
 478        struct tp_params *tp = &adap->params.tp;
 479        u64 ntuple = 0;
 480
 481        /* Initialize each of the fields which we care about which are present
 482         * in the Compressed Filter Tuple.
 483         */
 484        if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
 485                ntuple |= (u64)(FT_VLAN_VLD_F | l2t->vlan) << tp->vlan_shift;
 486
 487        if (tp->port_shift >= 0)
 488                ntuple |= (u64)l2t->lport << tp->port_shift;
 489
 490        if (tp->protocol_shift >= 0)
 491                ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
 492
 493        if (tp->vnic_shift >= 0 && (tp->ingress_config & VNIC_F)) {
 494                struct port_info *pi = (struct port_info *)netdev_priv(dev);
 495
 496                ntuple |= (u64)(FT_VNID_ID_VF_V(pi->vin) |
 497                                FT_VNID_ID_PF_V(adap->pf) |
 498                                FT_VNID_ID_VLD_V(pi->vivld)) << tp->vnic_shift;
 499        }
 500
 501        return ntuple;
 502}
 503EXPORT_SYMBOL(cxgb4_select_ntuple);
 504
 505/*
 506 * Called when the host's neighbor layer makes a change to some entry that is
 507 * loaded into the HW L2 table.
 508 */
 509void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
 510{
 511        unsigned int addr_len = neigh->tbl->key_len;
 512        u32 *addr = (u32 *) neigh->primary_key;
 513        int hash, ifidx = neigh->dev->ifindex;
 514        struct sk_buff_head *arpq = NULL;
 515        struct l2t_data *d = adap->l2t;
 516        struct l2t_entry *e;
 517
 518        hash = addr_hash(d, addr, addr_len, ifidx);
 519        read_lock_bh(&d->lock);
 520        for (e = d->l2tab[hash].first; e; e = e->next)
 521                if (!addreq(e, addr) && e->ifindex == ifidx) {
 522                        spin_lock(&e->lock);
 523                        if (atomic_read(&e->refcnt))
 524                                goto found;
 525                        spin_unlock(&e->lock);
 526                        break;
 527                }
 528        read_unlock_bh(&d->lock);
 529        return;
 530
 531 found:
 532        read_unlock(&d->lock);
 533
 534        if (neigh != e->neigh)
 535                neigh_replace(e, neigh);
 536
 537        if (e->state == L2T_STATE_RESOLVING) {
 538                if (neigh->nud_state & NUD_FAILED) {
 539                        arpq = &e->arpq;
 540                } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) &&
 541                           !skb_queue_empty(&e->arpq)) {
 542                        write_l2e(adap, e, 1);
 543                }
 544        } else {
 545                e->state = neigh->nud_state & NUD_CONNECTED ?
 546                        L2T_STATE_VALID : L2T_STATE_STALE;
 547                if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)))
 548                        write_l2e(adap, e, 0);
 549        }
 550
 551        if (arpq) {
 552                struct sk_buff *skb;
 553
 554                /* Called when address resolution fails for an L2T
 555                 * entry to handle packets on the arpq head. If a
 556                 * packet specifies a failure handler it is invoked,
 557                 * otherwise the packet is sent to the device.
 558                 */
 559                while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
 560                        const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
 561
 562                        spin_unlock(&e->lock);
 563                        if (cb->arp_err_handler)
 564                                cb->arp_err_handler(cb->handle, skb);
 565                        else
 566                                t4_ofld_send(adap, skb);
 567                        spin_lock(&e->lock);
 568                }
 569        }
 570        spin_unlock_bh(&e->lock);
 571}
 572
 573/* Allocate an L2T entry for use by a switching rule.  Such need to be
 574 * explicitly freed and while busy they are not on any hash chain, so normal
 575 * address resolution updates do not see them.
 576 */
 577struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan,
 578                                         u8 port, u8 *eth_addr)
 579{
 580        struct l2t_data *d = adap->l2t;
 581        struct l2t_entry *e;
 582        int ret;
 583
 584        write_lock_bh(&d->lock);
 585        e = find_or_alloc_l2e(d, vlan, port, eth_addr);
 586        if (e) {
 587                spin_lock(&e->lock);          /* avoid race with t4_l2t_free */
 588                if (!atomic_read(&e->refcnt)) {
 589                        e->state = L2T_STATE_SWITCHING;
 590                        e->vlan = vlan;
 591                        e->lport = port;
 592                        ether_addr_copy(e->dmac, eth_addr);
 593                        atomic_set(&e->refcnt, 1);
 594                        ret = write_l2e(adap, e, 0);
 595                        if (ret < 0) {
 596                                _t4_l2e_free(e);
 597                                spin_unlock(&e->lock);
 598                                write_unlock_bh(&d->lock);
 599                                return NULL;
 600                        }
 601                } else {
 602                        atomic_inc(&e->refcnt);
 603                }
 604
 605                spin_unlock(&e->lock);
 606        }
 607        write_unlock_bh(&d->lock);
 608        return e;
 609}
 610
 611/**
 612 * cxgb4_l2t_alloc_switching - Allocates an L2T entry for switch filters
 613 * @dev: net_device pointer
 614 * @vlan: VLAN Id
 615 * @port: Associated port
 616 * @dmac: Destination MAC address to add to L2T
 617 * Returns pointer to the allocated l2t entry
 618 *
 619 * Allocates an L2T entry for use by switching rule of a filter
 620 */
 621struct l2t_entry *cxgb4_l2t_alloc_switching(struct net_device *dev, u16 vlan,
 622                                            u8 port, u8 *dmac)
 623{
 624        struct adapter *adap = netdev2adap(dev);
 625
 626        return t4_l2t_alloc_switching(adap, vlan, port, dmac);
 627}
 628EXPORT_SYMBOL(cxgb4_l2t_alloc_switching);
 629
 630struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
 631{
 632        unsigned int l2t_size;
 633        int i;
 634        struct l2t_data *d;
 635
 636        if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE)
 637                return NULL;
 638        l2t_size = l2t_end - l2t_start + 1;
 639        if (l2t_size < L2T_MIN_HASH_BUCKETS)
 640                return NULL;
 641
 642        d = kvzalloc(struct_size(d, l2tab, l2t_size), GFP_KERNEL);
 643        if (!d)
 644                return NULL;
 645
 646        d->l2t_start = l2t_start;
 647        d->l2t_size = l2t_size;
 648
 649        d->rover = d->l2tab;
 650        atomic_set(&d->nfree, l2t_size);
 651        rwlock_init(&d->lock);
 652
 653        for (i = 0; i < d->l2t_size; ++i) {
 654                d->l2tab[i].idx = i;
 655                d->l2tab[i].state = L2T_STATE_UNUSED;
 656                spin_lock_init(&d->l2tab[i].lock);
 657                atomic_set(&d->l2tab[i].refcnt, 0);
 658                skb_queue_head_init(&d->l2tab[i].arpq);
 659        }
 660        return d;
 661}
 662
 663static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
 664{
 665        struct l2t_data *d = seq->private;
 666
 667        return pos >= d->l2t_size ? NULL : &d->l2tab[pos];
 668}
 669
 670static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
 671{
 672        return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
 673}
 674
 675static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 676{
 677        v = l2t_get_idx(seq, *pos);
 678        ++(*pos);
 679        return v;
 680}
 681
 682static void l2t_seq_stop(struct seq_file *seq, void *v)
 683{
 684}
 685
 686static char l2e_state(const struct l2t_entry *e)
 687{
 688        switch (e->state) {
 689        case L2T_STATE_VALID: return 'V';
 690        case L2T_STATE_STALE: return 'S';
 691        case L2T_STATE_SYNC_WRITE: return 'W';
 692        case L2T_STATE_RESOLVING:
 693                return skb_queue_empty(&e->arpq) ? 'R' : 'A';
 694        case L2T_STATE_SWITCHING: return 'X';
 695        default:
 696                return 'U';
 697        }
 698}
 699
 700bool cxgb4_check_l2t_valid(struct l2t_entry *e)
 701{
 702        bool valid;
 703
 704        spin_lock(&e->lock);
 705        valid = (e->state == L2T_STATE_VALID);
 706        spin_unlock(&e->lock);
 707        return valid;
 708}
 709EXPORT_SYMBOL(cxgb4_check_l2t_valid);
 710
 711static int l2t_seq_show(struct seq_file *seq, void *v)
 712{
 713        if (v == SEQ_START_TOKEN)
 714                seq_puts(seq, " Idx IP address                "
 715                         "Ethernet address  VLAN/P LP State Users Port\n");
 716        else {
 717                char ip[60];
 718                struct l2t_data *d = seq->private;
 719                struct l2t_entry *e = v;
 720
 721                spin_lock_bh(&e->lock);
 722                if (e->state == L2T_STATE_SWITCHING)
 723                        ip[0] = '\0';
 724                else
 725                        sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
 726                seq_printf(seq, "%4u %-25s %17pM %4d %u %2u   %c   %5u %s\n",
 727                           e->idx + d->l2t_start, ip, e->dmac,
 728                           e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
 729                           l2e_state(e), atomic_read(&e->refcnt),
 730                           e->neigh ? e->neigh->dev->name : "");
 731                spin_unlock_bh(&e->lock);
 732        }
 733        return 0;
 734}
 735
 736static const struct seq_operations l2t_seq_ops = {
 737        .start = l2t_seq_start,
 738        .next = l2t_seq_next,
 739        .stop = l2t_seq_stop,
 740        .show = l2t_seq_show
 741};
 742
 743static int l2t_seq_open(struct inode *inode, struct file *file)
 744{
 745        int rc = seq_open(file, &l2t_seq_ops);
 746
 747        if (!rc) {
 748                struct adapter *adap = inode->i_private;
 749                struct seq_file *seq = file->private_data;
 750
 751                seq->private = adap->l2t;
 752        }
 753        return rc;
 754}
 755
 756const struct file_operations t4_l2t_fops = {
 757        .owner = THIS_MODULE,
 758        .open = l2t_seq_open,
 759        .read = seq_read,
 760        .llseek = seq_lseek,
 761        .release = seq_release,
 762};
 763