linux/drivers/net/ethernet/netronome/nfp/flower/metadata.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
   3
   4#include <linux/hash.h>
   5#include <linux/hashtable.h>
   6#include <linux/jhash.h>
   7#include <linux/math64.h>
   8#include <linux/vmalloc.h>
   9#include <net/pkt_cls.h>
  10
  11#include "cmsg.h"
  12#include "main.h"
  13#include "../nfp_app.h"
  14
  15struct nfp_mask_id_table {
  16        struct hlist_node link;
  17        u32 hash_key;
  18        u32 ref_cnt;
  19        u8 mask_id;
  20};
  21
  22struct nfp_fl_flow_table_cmp_arg {
  23        struct net_device *netdev;
  24        unsigned long cookie;
  25};
  26
  27struct nfp_fl_stats_ctx_to_flow {
  28        struct rhash_head ht_node;
  29        u32 stats_cxt;
  30        struct nfp_fl_payload *flow;
  31};
  32
  33static const struct rhashtable_params stats_ctx_table_params = {
  34        .key_offset     = offsetof(struct nfp_fl_stats_ctx_to_flow, stats_cxt),
  35        .head_offset    = offsetof(struct nfp_fl_stats_ctx_to_flow, ht_node),
  36        .key_len        = sizeof(u32),
  37};
  38
  39static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
  40{
  41        struct nfp_flower_priv *priv = app->priv;
  42        struct circ_buf *ring;
  43
  44        ring = &priv->stats_ids.free_list;
  45        /* Check if buffer is full. */
  46        if (!CIRC_SPACE(ring->head, ring->tail,
  47                        priv->stats_ring_size * NFP_FL_STATS_ELEM_RS -
  48                        NFP_FL_STATS_ELEM_RS + 1))
  49                return -ENOBUFS;
  50
  51        memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
  52        ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
  53                     (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
  54
  55        return 0;
  56}
  57
  58static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
  59{
  60        struct nfp_flower_priv *priv = app->priv;
  61        u32 freed_stats_id, temp_stats_id;
  62        struct circ_buf *ring;
  63
  64        ring = &priv->stats_ids.free_list;
  65        freed_stats_id = priv->stats_ring_size;
  66        /* Check for unallocated entries first. */
  67        if (priv->stats_ids.init_unalloc > 0) {
  68                if (priv->active_mem_unit == priv->total_mem_units) {
  69                        priv->stats_ids.init_unalloc--;
  70                        priv->active_mem_unit = 0;
  71                }
  72
  73                *stats_context_id =
  74                        FIELD_PREP(NFP_FL_STAT_ID_STAT,
  75                                   priv->stats_ids.init_unalloc - 1) |
  76                        FIELD_PREP(NFP_FL_STAT_ID_MU_NUM,
  77                                   priv->active_mem_unit);
  78                priv->active_mem_unit++;
  79                return 0;
  80        }
  81
  82        /* Check if buffer is empty. */
  83        if (ring->head == ring->tail) {
  84                *stats_context_id = freed_stats_id;
  85                return -ENOENT;
  86        }
  87
  88        memcpy(&temp_stats_id, &ring->buf[ring->tail], NFP_FL_STATS_ELEM_RS);
  89        *stats_context_id = temp_stats_id;
  90        memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS);
  91        ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) %
  92                     (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
  93
  94        return 0;
  95}
  96
  97/* Must be called with either RTNL or rcu_read_lock */
  98struct nfp_fl_payload *
  99nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
 100                           struct net_device *netdev)
 101{
 102        struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
 103        struct nfp_flower_priv *priv = app->priv;
 104
 105        flower_cmp_arg.netdev = netdev;
 106        flower_cmp_arg.cookie = tc_flower_cookie;
 107
 108        return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
 109                                      nfp_flower_table_params);
 110}
 111
 112void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
 113{
 114        unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
 115        struct nfp_flower_priv *priv = app->priv;
 116        struct nfp_fl_stats_frame *stats;
 117        unsigned char *msg;
 118        u32 ctx_id;
 119        int i;
 120
 121        msg = nfp_flower_cmsg_get_data(skb);
 122
 123        spin_lock(&priv->stats_lock);
 124        for (i = 0; i < msg_len / sizeof(*stats); i++) {
 125                stats = (struct nfp_fl_stats_frame *)msg + i;
 126                ctx_id = be32_to_cpu(stats->stats_con_id);
 127                priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count);
 128                priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count);
 129                priv->stats[ctx_id].used = jiffies;
 130        }
 131        spin_unlock(&priv->stats_lock);
 132}
 133
 134static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
 135{
 136        struct nfp_flower_priv *priv = app->priv;
 137        struct circ_buf *ring;
 138
 139        ring = &priv->mask_ids.mask_id_free_list;
 140        /* Checking if buffer is full. */
 141        if (CIRC_SPACE(ring->head, ring->tail, NFP_FLOWER_MASK_ENTRY_RS) == 0)
 142                return -ENOBUFS;
 143
 144        memcpy(&ring->buf[ring->head], &mask_id, NFP_FLOWER_MASK_ELEMENT_RS);
 145        ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) %
 146                     (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
 147
 148        priv->mask_ids.last_used[mask_id] = ktime_get();
 149
 150        return 0;
 151}
 152
 153static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
 154{
 155        struct nfp_flower_priv *priv = app->priv;
 156        ktime_t reuse_timeout;
 157        struct circ_buf *ring;
 158        u8 temp_id, freed_id;
 159
 160        ring = &priv->mask_ids.mask_id_free_list;
 161        freed_id = NFP_FLOWER_MASK_ENTRY_RS - 1;
 162        /* Checking for unallocated entries first. */
 163        if (priv->mask_ids.init_unallocated > 0) {
 164                *mask_id = priv->mask_ids.init_unallocated;
 165                priv->mask_ids.init_unallocated--;
 166                return 0;
 167        }
 168
 169        /* Checking if buffer is empty. */
 170        if (ring->head == ring->tail)
 171                goto err_not_found;
 172
 173        memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS);
 174        *mask_id = temp_id;
 175
 176        reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id],
 177                                     NFP_FL_MASK_REUSE_TIME_NS);
 178
 179        if (ktime_before(ktime_get(), reuse_timeout))
 180                goto err_not_found;
 181
 182        memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS);
 183        ring->tail = (ring->tail + NFP_FLOWER_MASK_ELEMENT_RS) %
 184                     (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
 185
 186        return 0;
 187
 188err_not_found:
 189        *mask_id = freed_id;
 190        return -ENOENT;
 191}
 192
 193static int
 194nfp_add_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
 195{
 196        struct nfp_flower_priv *priv = app->priv;
 197        struct nfp_mask_id_table *mask_entry;
 198        unsigned long hash_key;
 199        u8 mask_id;
 200
 201        if (nfp_mask_alloc(app, &mask_id))
 202                return -ENOENT;
 203
 204        mask_entry = kmalloc(sizeof(*mask_entry), GFP_KERNEL);
 205        if (!mask_entry) {
 206                nfp_release_mask_id(app, mask_id);
 207                return -ENOMEM;
 208        }
 209
 210        INIT_HLIST_NODE(&mask_entry->link);
 211        mask_entry->mask_id = mask_id;
 212        hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
 213        mask_entry->hash_key = hash_key;
 214        mask_entry->ref_cnt = 1;
 215        hash_add(priv->mask_table, &mask_entry->link, hash_key);
 216
 217        return mask_id;
 218}
 219
 220static struct nfp_mask_id_table *
 221nfp_search_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
 222{
 223        struct nfp_flower_priv *priv = app->priv;
 224        struct nfp_mask_id_table *mask_entry;
 225        unsigned long hash_key;
 226
 227        hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
 228
 229        hash_for_each_possible(priv->mask_table, mask_entry, link, hash_key)
 230                if (mask_entry->hash_key == hash_key)
 231                        return mask_entry;
 232
 233        return NULL;
 234}
 235
 236static int
 237nfp_find_in_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
 238{
 239        struct nfp_mask_id_table *mask_entry;
 240
 241        mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
 242        if (!mask_entry)
 243                return -ENOENT;
 244
 245        mask_entry->ref_cnt++;
 246
 247        /* Casting u8 to int for later use. */
 248        return mask_entry->mask_id;
 249}
 250
 251static bool
 252nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len,
 253                   u8 *meta_flags, u8 *mask_id)
 254{
 255        int id;
 256
 257        id = nfp_find_in_mask_table(app, mask_data, mask_len);
 258        if (id < 0) {
 259                id = nfp_add_mask_table(app, mask_data, mask_len);
 260                if (id < 0)
 261                        return false;
 262                *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
 263        }
 264        *mask_id = id;
 265
 266        return true;
 267}
 268
 269static bool
 270nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
 271                      u8 *meta_flags, u8 *mask_id)
 272{
 273        struct nfp_mask_id_table *mask_entry;
 274
 275        mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
 276        if (!mask_entry)
 277                return false;
 278
 279        *mask_id = mask_entry->mask_id;
 280        mask_entry->ref_cnt--;
 281        if (!mask_entry->ref_cnt) {
 282                hash_del(&mask_entry->link);
 283                nfp_release_mask_id(app, *mask_id);
 284                kfree(mask_entry);
 285                if (meta_flags)
 286                        *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
 287        }
 288
 289        return true;
 290}
 291
 292int nfp_compile_flow_metadata(struct nfp_app *app,
 293                              struct flow_cls_offload *flow,
 294                              struct nfp_fl_payload *nfp_flow,
 295                              struct net_device *netdev,
 296                              struct netlink_ext_ack *extack)
 297{
 298        struct nfp_fl_stats_ctx_to_flow *ctx_entry;
 299        struct nfp_flower_priv *priv = app->priv;
 300        struct nfp_fl_payload *check_entry;
 301        u8 new_mask_id;
 302        u32 stats_cxt;
 303        int err;
 304
 305        err = nfp_get_stats_entry(app, &stats_cxt);
 306        if (err) {
 307                NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate new stats context");
 308                return err;
 309        }
 310
 311        nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
 312        nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
 313        nfp_flow->ingress_dev = netdev;
 314
 315        ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL);
 316        if (!ctx_entry) {
 317                err = -ENOMEM;
 318                goto err_release_stats;
 319        }
 320
 321        ctx_entry->stats_cxt = stats_cxt;
 322        ctx_entry->flow = nfp_flow;
 323
 324        if (rhashtable_insert_fast(&priv->stats_ctx_table, &ctx_entry->ht_node,
 325                                   stats_ctx_table_params)) {
 326                err = -ENOMEM;
 327                goto err_free_ctx_entry;
 328        }
 329
 330        new_mask_id = 0;
 331        if (!nfp_check_mask_add(app, nfp_flow->mask_data,
 332                                nfp_flow->meta.mask_len,
 333                                &nfp_flow->meta.flags, &new_mask_id)) {
 334                NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id");
 335                if (nfp_release_stats_entry(app, stats_cxt)) {
 336                        NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
 337                        err = -EINVAL;
 338                        goto err_remove_rhash;
 339                }
 340                err = -ENOENT;
 341                goto err_remove_rhash;
 342        }
 343
 344        nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
 345        priv->flower_version++;
 346
 347        /* Update flow payload with mask ids. */
 348        nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
 349        priv->stats[stats_cxt].pkts = 0;
 350        priv->stats[stats_cxt].bytes = 0;
 351        priv->stats[stats_cxt].used = jiffies;
 352
 353        check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
 354        if (check_entry) {
 355                NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot offload duplicate flow entry");
 356                if (nfp_release_stats_entry(app, stats_cxt)) {
 357                        NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
 358                        err = -EINVAL;
 359                        goto err_remove_mask;
 360                }
 361
 362                if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
 363                                           nfp_flow->meta.mask_len,
 364                                           NULL, &new_mask_id)) {
 365                        NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id");
 366                        err = -EINVAL;
 367                        goto err_remove_mask;
 368                }
 369
 370                err = -EEXIST;
 371                goto err_remove_mask;
 372        }
 373
 374        return 0;
 375
 376err_remove_mask:
 377        nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len,
 378                              NULL, &new_mask_id);
 379err_remove_rhash:
 380        WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
 381                                            &ctx_entry->ht_node,
 382                                            stats_ctx_table_params));
 383err_free_ctx_entry:
 384        kfree(ctx_entry);
 385err_release_stats:
 386        nfp_release_stats_entry(app, stats_cxt);
 387
 388        return err;
 389}
 390
 391void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
 392                                struct nfp_fl_payload *nfp_flow)
 393{
 394        nfp_flow->meta.flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
 395        nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
 396        priv->flower_version++;
 397}
 398
 399int nfp_modify_flow_metadata(struct nfp_app *app,
 400                             struct nfp_fl_payload *nfp_flow)
 401{
 402        struct nfp_fl_stats_ctx_to_flow *ctx_entry;
 403        struct nfp_flower_priv *priv = app->priv;
 404        u8 new_mask_id = 0;
 405        u32 temp_ctx_id;
 406
 407        __nfp_modify_flow_metadata(priv, nfp_flow);
 408
 409        nfp_check_mask_remove(app, nfp_flow->mask_data,
 410                              nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
 411                              &new_mask_id);
 412
 413        /* Update flow payload with mask ids. */
 414        nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
 415
 416        /* Release the stats ctx id and ctx to flow table entry. */
 417        temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
 418
 419        ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &temp_ctx_id,
 420                                           stats_ctx_table_params);
 421        if (!ctx_entry)
 422                return -ENOENT;
 423
 424        WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
 425                                            &ctx_entry->ht_node,
 426                                            stats_ctx_table_params));
 427        kfree(ctx_entry);
 428
 429        return nfp_release_stats_entry(app, temp_ctx_id);
 430}
 431
 432struct nfp_fl_payload *
 433nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id)
 434{
 435        struct nfp_fl_stats_ctx_to_flow *ctx_entry;
 436        struct nfp_flower_priv *priv = app->priv;
 437
 438        ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &ctx_id,
 439                                           stats_ctx_table_params);
 440        if (!ctx_entry)
 441                return NULL;
 442
 443        return ctx_entry->flow;
 444}
 445
 446static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
 447                            const void *obj)
 448{
 449        const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
 450        const struct nfp_fl_payload *flow_entry = obj;
 451
 452        if (flow_entry->ingress_dev == cmp_arg->netdev)
 453                return flow_entry->tc_flower_cookie != cmp_arg->cookie;
 454
 455        return 1;
 456}
 457
 458static u32 nfp_fl_obj_hashfn(const void *data, u32 len, u32 seed)
 459{
 460        const struct nfp_fl_payload *flower_entry = data;
 461
 462        return jhash2((u32 *)&flower_entry->tc_flower_cookie,
 463                      sizeof(flower_entry->tc_flower_cookie) / sizeof(u32),
 464                      seed);
 465}
 466
 467static u32 nfp_fl_key_hashfn(const void *data, u32 len, u32 seed)
 468{
 469        const struct nfp_fl_flow_table_cmp_arg *cmp_arg = data;
 470
 471        return jhash2((u32 *)&cmp_arg->cookie,
 472                      sizeof(cmp_arg->cookie) / sizeof(u32), seed);
 473}
 474
 475const struct rhashtable_params nfp_flower_table_params = {
 476        .head_offset            = offsetof(struct nfp_fl_payload, fl_node),
 477        .hashfn                 = nfp_fl_key_hashfn,
 478        .obj_cmpfn              = nfp_fl_obj_cmpfn,
 479        .obj_hashfn             = nfp_fl_obj_hashfn,
 480        .automatic_shrinking    = true,
 481};
 482
 483int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
 484                             unsigned int host_num_mems)
 485{
 486        struct nfp_flower_priv *priv = app->priv;
 487        int err, stats_size;
 488
 489        hash_init(priv->mask_table);
 490
 491        err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params);
 492        if (err)
 493                return err;
 494
 495        err = rhashtable_init(&priv->stats_ctx_table, &stats_ctx_table_params);
 496        if (err)
 497                goto err_free_flow_table;
 498
 499        get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
 500
 501        /* Init ring buffer and unallocated mask_ids. */
 502        priv->mask_ids.mask_id_free_list.buf =
 503                kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
 504                              NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
 505        if (!priv->mask_ids.mask_id_free_list.buf)
 506                goto err_free_stats_ctx_table;
 507
 508        priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
 509
 510        /* Init timestamps for mask id*/
 511        priv->mask_ids.last_used =
 512                kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
 513                              sizeof(*priv->mask_ids.last_used), GFP_KERNEL);
 514        if (!priv->mask_ids.last_used)
 515                goto err_free_mask_id;
 516
 517        /* Init ring buffer and unallocated stats_ids. */
 518        priv->stats_ids.free_list.buf =
 519                vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
 520                                   priv->stats_ring_size));
 521        if (!priv->stats_ids.free_list.buf)
 522                goto err_free_last_used;
 523
 524        priv->stats_ids.init_unalloc = div_u64(host_ctx_count, host_num_mems);
 525
 526        stats_size = FIELD_PREP(NFP_FL_STAT_ID_STAT, host_ctx_count) |
 527                     FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, host_num_mems - 1);
 528        priv->stats = kvmalloc_array(stats_size, sizeof(struct nfp_fl_stats),
 529                                     GFP_KERNEL);
 530        if (!priv->stats)
 531                goto err_free_ring_buf;
 532
 533        spin_lock_init(&priv->stats_lock);
 534
 535        return 0;
 536
 537err_free_ring_buf:
 538        vfree(priv->stats_ids.free_list.buf);
 539err_free_last_used:
 540        kfree(priv->mask_ids.last_used);
 541err_free_mask_id:
 542        kfree(priv->mask_ids.mask_id_free_list.buf);
 543err_free_stats_ctx_table:
 544        rhashtable_destroy(&priv->stats_ctx_table);
 545err_free_flow_table:
 546        rhashtable_destroy(&priv->flow_table);
 547        return -ENOMEM;
 548}
 549
 550void nfp_flower_metadata_cleanup(struct nfp_app *app)
 551{
 552        struct nfp_flower_priv *priv = app->priv;
 553
 554        if (!priv)
 555                return;
 556
 557        rhashtable_free_and_destroy(&priv->flow_table,
 558                                    nfp_check_rhashtable_empty, NULL);
 559        rhashtable_free_and_destroy(&priv->stats_ctx_table,
 560                                    nfp_check_rhashtable_empty, NULL);
 561        kvfree(priv->stats);
 562        kfree(priv->mask_ids.mask_id_free_list.buf);
 563        kfree(priv->mask_ids.last_used);
 564        vfree(priv->stats_ids.free_list.buf);
 565}
 566