linux/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/mlx5/driver.h>
  34#include <linux/mlx5/fs.h>
  35#include <linux/rbtree.h>
  36#include "mlx5_core.h"
  37#include "fs_core.h"
  38#include "fs_cmd.h"
  39
  40#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
  41
  42/* locking scheme:
  43 *
  44 * It is the responsibility of the user to prevent concurrent calls or bad
  45 * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
  46 * to struct mlx5_fc.
  47 * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
  48 * dump (access to struct mlx5_fc) after a counter is destroyed.
  49 *
  50 * access to counter list:
  51 * - create (user context)
  52 *   - mlx5_fc_create() only adds to an addlist to be used by
  53 *     mlx5_fc_stats_query_work(). addlist is protected by a spinlock.
  54 *   - spawn thread to do the actual destroy
  55 *
  56 * - destroy (user context)
  57 *   - mark a counter as deleted
  58 *   - spawn thread to do the actual del
  59 *
  60 * - dump (user context)
  61 *   user should not call dump after destroy
  62 *
  63 * - query (single thread workqueue context)
  64 *   destroy/dump - no conflict (see destroy)
  65 *   query/dump - packets and bytes might be inconsistent (since update is not
  66 *                atomic)
  67 *   query/create - no conflict (see create)
  68 *   since every create/destroy spawn the work, only after necessary time has
  69 *   elapsed, the thread will actually query the hardware.
  70 */
  71
  72static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter)
  73{
  74        struct rb_node **new = &root->rb_node;
  75        struct rb_node *parent = NULL;
  76
  77        while (*new) {
  78                struct mlx5_fc *this = container_of(*new, struct mlx5_fc, node);
  79                int result = counter->id - this->id;
  80
  81                parent = *new;
  82                if (result < 0)
  83                        new = &((*new)->rb_left);
  84                else
  85                        new = &((*new)->rb_right);
  86        }
  87
  88        /* Add new node and rebalance tree. */
  89        rb_link_node(&counter->node, parent, new);
  90        rb_insert_color(&counter->node, root);
  91}
  92
  93static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
  94                                           struct mlx5_fc *first,
  95                                           u16 last_id)
  96{
  97        struct mlx5_cmd_fc_bulk *b;
  98        struct rb_node *node = NULL;
  99        u16 afirst_id;
 100        int num;
 101        int err;
 102        int max_bulk = 1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk);
 103
 104        /* first id must be aligned to 4 when using bulk query */
 105        afirst_id = first->id & ~0x3;
 106
 107        /* number of counters to query inc. the last counter */
 108        num = ALIGN(last_id - afirst_id + 1, 4);
 109        if (num > max_bulk) {
 110                num = max_bulk;
 111                last_id = afirst_id + num - 1;
 112        }
 113
 114        b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
 115        if (!b) {
 116                mlx5_core_err(dev, "Error allocating resources for bulk query\n");
 117                return NULL;
 118        }
 119
 120        err = mlx5_cmd_fc_bulk_query(dev, b);
 121        if (err) {
 122                mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
 123                goto out;
 124        }
 125
 126        for (node = &first->node; node; node = rb_next(node)) {
 127                struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node);
 128                struct mlx5_fc_cache *c = &counter->cache;
 129                u64 packets;
 130                u64 bytes;
 131
 132                if (counter->id > last_id)
 133                        break;
 134
 135                mlx5_cmd_fc_bulk_get(dev, b,
 136                                     counter->id, &packets, &bytes);
 137
 138                if (c->packets == packets)
 139                        continue;
 140
 141                c->packets = packets;
 142                c->bytes = bytes;
 143                c->lastuse = jiffies;
 144        }
 145
 146out:
 147        mlx5_cmd_fc_bulk_free(b);
 148
 149        return node;
 150}
 151
 152static void mlx5_fc_stats_work(struct work_struct *work)
 153{
 154        struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
 155                                                 priv.fc_stats.work.work);
 156        struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
 157        unsigned long now = jiffies;
 158        struct mlx5_fc *counter = NULL;
 159        struct mlx5_fc *last = NULL;
 160        struct rb_node *node;
 161        LIST_HEAD(tmplist);
 162
 163        spin_lock(&fc_stats->addlist_lock);
 164
 165        list_splice_tail_init(&fc_stats->addlist, &tmplist);
 166
 167        if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters))
 168                queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
 169
 170        spin_unlock(&fc_stats->addlist_lock);
 171
 172        list_for_each_entry(counter, &tmplist, list)
 173                mlx5_fc_stats_insert(&fc_stats->counters, counter);
 174
 175        node = rb_first(&fc_stats->counters);
 176        while (node) {
 177                counter = rb_entry(node, struct mlx5_fc, node);
 178
 179                node = rb_next(node);
 180
 181                if (counter->deleted) {
 182                        rb_erase(&counter->node, &fc_stats->counters);
 183
 184                        mlx5_cmd_fc_free(dev, counter->id);
 185
 186                        kfree(counter);
 187                        continue;
 188                }
 189
 190                last = counter;
 191        }
 192
 193        if (time_before(now, fc_stats->next_query) || !last)
 194                return;
 195
 196        node = rb_first(&fc_stats->counters);
 197        while (node) {
 198                counter = rb_entry(node, struct mlx5_fc, node);
 199
 200                node = mlx5_fc_stats_query(dev, counter, last->id);
 201        }
 202
 203        fc_stats->next_query = now + MLX5_FC_STATS_PERIOD;
 204}
 205
 206struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
 207{
 208        struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
 209        struct mlx5_fc *counter;
 210        int err;
 211
 212        counter = kzalloc(sizeof(*counter), GFP_KERNEL);
 213        if (!counter)
 214                return ERR_PTR(-ENOMEM);
 215
 216        err = mlx5_cmd_fc_alloc(dev, &counter->id);
 217        if (err)
 218                goto err_out;
 219
 220        if (aging) {
 221                counter->cache.lastuse = jiffies;
 222                counter->aging = true;
 223
 224                spin_lock(&fc_stats->addlist_lock);
 225                list_add(&counter->list, &fc_stats->addlist);
 226                spin_unlock(&fc_stats->addlist_lock);
 227
 228                mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
 229        }
 230
 231        return counter;
 232
 233err_out:
 234        kfree(counter);
 235
 236        return ERR_PTR(err);
 237}
 238
 239void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
 240{
 241        struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
 242
 243        if (!counter)
 244                return;
 245
 246        if (counter->aging) {
 247                counter->deleted = true;
 248                mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
 249                return;
 250        }
 251
 252        mlx5_cmd_fc_free(dev, counter->id);
 253        kfree(counter);
 254}
 255
 256int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
 257{
 258        struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
 259
 260        fc_stats->counters = RB_ROOT;
 261        INIT_LIST_HEAD(&fc_stats->addlist);
 262        spin_lock_init(&fc_stats->addlist_lock);
 263
 264        fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
 265        if (!fc_stats->wq)
 266                return -ENOMEM;
 267
 268        INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
 269
 270        return 0;
 271}
 272
 273void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
 274{
 275        struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
 276        struct mlx5_fc *counter;
 277        struct mlx5_fc *tmp;
 278        struct rb_node *node;
 279
 280        cancel_delayed_work_sync(&dev->priv.fc_stats.work);
 281        destroy_workqueue(dev->priv.fc_stats.wq);
 282        dev->priv.fc_stats.wq = NULL;
 283
 284        list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) {
 285                list_del(&counter->list);
 286
 287                mlx5_cmd_fc_free(dev, counter->id);
 288
 289                kfree(counter);
 290        }
 291
 292        node = rb_first(&fc_stats->counters);
 293        while (node) {
 294                counter = rb_entry(node, struct mlx5_fc, node);
 295
 296                node = rb_next(node);
 297
 298                rb_erase(&counter->node, &fc_stats->counters);
 299
 300                mlx5_cmd_fc_free(dev, counter->id);
 301
 302                kfree(counter);
 303        }
 304}
 305
 306void mlx5_fc_query_cached(struct mlx5_fc *counter,
 307                          u64 *bytes, u64 *packets, u64 *lastuse)
 308{
 309        struct mlx5_fc_cache c;
 310
 311        c = counter->cache;
 312
 313        *bytes = c.bytes - counter->lastbytes;
 314        *packets = c.packets - counter->lastpackets;
 315        *lastuse = c.lastuse;
 316
 317        counter->lastbytes = c.bytes;
 318        counter->lastpackets = c.packets;
 319}
 320