linux/drivers/net/ethernet/cisco/enic/enic_clsf.c
<<
>>
Prefs
   1#include <linux/if.h>
   2#include <linux/if_ether.h>
   3#include <linux/if_link.h>
   4#include <linux/netdevice.h>
   5#include <linux/in.h>
   6#include <linux/types.h>
   7#include <linux/skbuff.h>
   8#include <net/flow_dissector.h>
   9#include "enic_res.h"
  10#include "enic_clsf.h"
  11
  12/* enic_addfltr_5t - Add ipv4 5tuple filter
  13 *      @enic: enic struct of vnic
  14 *      @keys: flow_keys of ipv4 5tuple
  15 *      @rq: rq number to steer to
  16 *
  17 * This function returns filter_id(hardware_id) of the filter
  18 * added. In case of error it returns a negative number.
  19 */
  20int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
  21{
  22        int res;
  23        struct filter data;
  24
  25        switch (keys->basic.ip_proto) {
  26        case IPPROTO_TCP:
  27                data.u.ipv4.protocol = PROTO_TCP;
  28                break;
  29        case IPPROTO_UDP:
  30                data.u.ipv4.protocol = PROTO_UDP;
  31                break;
  32        default:
  33                return -EPROTONOSUPPORT;
  34        };
  35        data.type = FILTER_IPV4_5TUPLE;
  36        data.u.ipv4.src_addr = ntohl(keys->addrs.v4addrs.src);
  37        data.u.ipv4.dst_addr = ntohl(keys->addrs.v4addrs.dst);
  38        data.u.ipv4.src_port = ntohs(keys->ports.src);
  39        data.u.ipv4.dst_port = ntohs(keys->ports.dst);
  40        data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
  41
  42        spin_lock_bh(&enic->devcmd_lock);
  43        res = vnic_dev_classifier(enic->vdev, CLSF_ADD, &rq, &data);
  44        spin_unlock_bh(&enic->devcmd_lock);
  45        res = (res == 0) ? rq : res;
  46
  47        return res;
  48}
  49
  50/* enic_delfltr - Delete clsf filter
  51 *      @enic: enic struct of vnic
  52 *      @filter_id: filter_is(hardware_id) of filter to be deleted
  53 *
  54 * This function returns zero in case of success, negative number incase of
  55 * error.
  56 */
  57int enic_delfltr(struct enic *enic, u16 filter_id)
  58{
  59        int ret;
  60
  61        spin_lock_bh(&enic->devcmd_lock);
  62        ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL);
  63        spin_unlock_bh(&enic->devcmd_lock);
  64
  65        return ret;
  66}
  67
  68/* enic_rfs_flw_tbl_init - initialize enic->rfs_h members
  69 *      @enic: enic data
  70 */
  71void enic_rfs_flw_tbl_init(struct enic *enic)
  72{
  73        int i;
  74
  75        spin_lock_init(&enic->rfs_h.lock);
  76        for (i = 0; i <= ENIC_RFS_FLW_MASK; i++)
  77                INIT_HLIST_HEAD(&enic->rfs_h.ht_head[i]);
  78        enic->rfs_h.max = enic->config.num_arfs;
  79        enic->rfs_h.free = enic->rfs_h.max;
  80        enic->rfs_h.toclean = 0;
  81}
  82
  83void enic_rfs_flw_tbl_free(struct enic *enic)
  84{
  85        int i;
  86
  87        enic_rfs_timer_stop(enic);
  88        spin_lock_bh(&enic->rfs_h.lock);
  89        for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
  90                struct hlist_head *hhead;
  91                struct hlist_node *tmp;
  92                struct enic_rfs_fltr_node *n;
  93
  94                hhead = &enic->rfs_h.ht_head[i];
  95                hlist_for_each_entry_safe(n, tmp, hhead, node) {
  96                        enic_delfltr(enic, n->fltr_id);
  97                        hlist_del(&n->node);
  98                        kfree(n);
  99                        enic->rfs_h.free++;
 100                }
 101        }
 102        spin_unlock_bh(&enic->rfs_h.lock);
 103}
 104
 105struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id)
 106{
 107        int i;
 108
 109        for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
 110                struct hlist_head *hhead;
 111                struct hlist_node *tmp;
 112                struct enic_rfs_fltr_node *n;
 113
 114                hhead = &enic->rfs_h.ht_head[i];
 115                hlist_for_each_entry_safe(n, tmp, hhead, node)
 116                        if (n->fltr_id == fltr_id)
 117                                return n;
 118        }
 119
 120        return NULL;
 121}
 122
 123#ifdef CONFIG_RFS_ACCEL
 124void enic_flow_may_expire(unsigned long data)
 125{
 126        struct enic *enic = (struct enic *)data;
 127        bool res;
 128        int j;
 129
 130        spin_lock_bh(&enic->rfs_h.lock);
 131        for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) {
 132                struct hlist_head *hhead;
 133                struct hlist_node *tmp;
 134                struct enic_rfs_fltr_node *n;
 135
 136                hhead = &enic->rfs_h.ht_head[enic->rfs_h.toclean++];
 137                hlist_for_each_entry_safe(n, tmp, hhead, node) {
 138                        res = rps_may_expire_flow(enic->netdev, n->rq_id,
 139                                                  n->flow_id, n->fltr_id);
 140                        if (res) {
 141                                res = enic_delfltr(enic, n->fltr_id);
 142                                if (unlikely(res))
 143                                        continue;
 144                                hlist_del(&n->node);
 145                                kfree(n);
 146                                enic->rfs_h.free++;
 147                        }
 148                }
 149        }
 150        spin_unlock_bh(&enic->rfs_h.lock);
 151        mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
 152}
 153
 154static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h,
 155                                                  struct flow_keys *k)
 156{
 157        struct enic_rfs_fltr_node *tpos;
 158
 159        hlist_for_each_entry(tpos, h, node)
 160                if (tpos->keys.addrs.v4addrs.src == k->addrs.v4addrs.src &&
 161                    tpos->keys.addrs.v4addrs.dst == k->addrs.v4addrs.dst &&
 162                    tpos->keys.ports.ports == k->ports.ports &&
 163                    tpos->keys.basic.ip_proto == k->basic.ip_proto &&
 164                    tpos->keys.basic.n_proto == k->basic.n_proto)
 165                        return tpos;
 166        return NULL;
 167}
 168
 169int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
 170                       u16 rxq_index, u32 flow_id)
 171{
 172        struct flow_keys keys;
 173        struct enic_rfs_fltr_node *n;
 174        struct enic *enic;
 175        u16 tbl_idx;
 176        int res, i;
 177
 178        enic = netdev_priv(dev);
 179        res = skb_flow_dissect_flow_keys(skb, &keys, 0);
 180        if (!res || keys.basic.n_proto != htons(ETH_P_IP) ||
 181            (keys.basic.ip_proto != IPPROTO_TCP &&
 182             keys.basic.ip_proto != IPPROTO_UDP))
 183                return -EPROTONOSUPPORT;
 184
 185        tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK;
 186        spin_lock_bh(&enic->rfs_h.lock);
 187        n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys);
 188
 189        if (n) { /* entry already present  */
 190                if (rxq_index == n->rq_id) {
 191                        res = -EEXIST;
 192                        goto ret_unlock;
 193                }
 194
 195                /* desired rq changed for the flow, we need to delete
 196                 * old fltr and add new one
 197                 *
 198                 * The moment we delete the fltr, the upcoming pkts
 199                 * are put it default rq based on rss. When we add
 200                 * new filter, upcoming pkts are put in desired queue.
 201                 * This could cause ooo pkts.
 202                 *
 203                 * Lets 1st try adding new fltr and then del old one.
 204                 */
 205                i = --enic->rfs_h.free;
 206                /* clsf tbl is full, we have to del old fltr first*/
 207                if (unlikely(i < 0)) {
 208                        enic->rfs_h.free++;
 209                        res = enic_delfltr(enic, n->fltr_id);
 210                        if (unlikely(res < 0))
 211                                goto ret_unlock;
 212                        res = enic_addfltr_5t(enic, &keys, rxq_index);
 213                        if (res < 0) {
 214                                hlist_del(&n->node);
 215                                enic->rfs_h.free++;
 216                                goto ret_unlock;
 217                        }
 218                /* add new fltr 1st then del old fltr */
 219                } else {
 220                        int ret;
 221
 222                        res = enic_addfltr_5t(enic, &keys, rxq_index);
 223                        if (res < 0) {
 224                                enic->rfs_h.free++;
 225                                goto ret_unlock;
 226                        }
 227                        ret = enic_delfltr(enic, n->fltr_id);
 228                        /* deleting old fltr failed. Add old fltr to list.
 229                         * enic_flow_may_expire() will try to delete it later.
 230                         */
 231                        if (unlikely(ret < 0)) {
 232                                struct enic_rfs_fltr_node *d;
 233                                struct hlist_head *head;
 234
 235                                head = &enic->rfs_h.ht_head[tbl_idx];
 236                                d = kmalloc(sizeof(*d), GFP_ATOMIC);
 237                                if (d) {
 238                                        d->fltr_id = n->fltr_id;
 239                                        INIT_HLIST_NODE(&d->node);
 240                                        hlist_add_head(&d->node, head);
 241                                }
 242                        } else {
 243                                enic->rfs_h.free++;
 244                        }
 245                }
 246                n->rq_id = rxq_index;
 247                n->fltr_id = res;
 248                n->flow_id = flow_id;
 249        /* entry not present */
 250        } else {
 251                i = --enic->rfs_h.free;
 252                if (i <= 0) {
 253                        enic->rfs_h.free++;
 254                        res = -EBUSY;
 255                        goto ret_unlock;
 256                }
 257
 258                n = kmalloc(sizeof(*n), GFP_ATOMIC);
 259                if (!n) {
 260                        res = -ENOMEM;
 261                        enic->rfs_h.free++;
 262                        goto ret_unlock;
 263                }
 264
 265                res = enic_addfltr_5t(enic, &keys, rxq_index);
 266                if (res < 0) {
 267                        kfree(n);
 268                        enic->rfs_h.free++;
 269                        goto ret_unlock;
 270                }
 271                n->rq_id = rxq_index;
 272                n->fltr_id = res;
 273                n->flow_id = flow_id;
 274                n->keys = keys;
 275                INIT_HLIST_NODE(&n->node);
 276                hlist_add_head(&n->node, &enic->rfs_h.ht_head[tbl_idx]);
 277        }
 278
 279ret_unlock:
 280        spin_unlock_bh(&enic->rfs_h.lock);
 281        return res;
 282}
 283
 284#endif /* CONFIG_RFS_ACCEL */
 285