linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
<<
>>
Prefs
   1/*
   2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
   3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
   4 * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
   5 *
   6 * Redistribution and use in source and binary forms, with or without
   7 * modification, are permitted provided that the following conditions are met:
   8 *
   9 * 1. Redistributions of source code must retain the above copyright
  10 *    notice, this list of conditions and the following disclaimer.
  11 * 2. Redistributions in binary form must reproduce the above copyright
  12 *    notice, this list of conditions and the following disclaimer in the
  13 *    documentation and/or other materials provided with the distribution.
  14 * 3. Neither the names of the copyright holders nor the names of its
  15 *    contributors may be used to endorse or promote products derived from
  16 *    this software without specific prior written permission.
  17 *
  18 * Alternatively, this software may be distributed under the terms of the
  19 * GNU General Public License ("GPL") version 2 as published by the Free
  20 * Software Foundation.
  21 *
  22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  32 * POSSIBILITY OF SUCH DAMAGE.
  33 */
  34
  35#include <linux/kernel.h>
  36#include <linux/bitops.h>
  37
  38#include "spectrum_cnt.h"
  39
  40#define MLXSW_SP_COUNTER_POOL_BANK_SIZE 4096
  41
  42struct mlxsw_sp_counter_sub_pool {
  43        unsigned int base_index;
  44        unsigned int size;
  45        unsigned int entry_size;
  46        unsigned int bank_count;
  47};
  48
  49struct mlxsw_sp_counter_pool {
  50        unsigned int pool_size;
  51        unsigned long *usage; /* Usage bitmap */
  52        struct mlxsw_sp_counter_sub_pool *sub_pools;
  53};
  54
  55static struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
  56        [MLXSW_SP_COUNTER_SUB_POOL_FLOW] = {
  57                .bank_count = 6,
  58        },
  59        [MLXSW_SP_COUNTER_SUB_POOL_RIF] = {
  60                .bank_count = 2,
  61        }
  62};
  63
  64static int mlxsw_sp_counter_pool_validate(struct mlxsw_sp *mlxsw_sp)
  65{
  66        unsigned int total_bank_config = 0;
  67        unsigned int pool_size;
  68        int i;
  69
  70        pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
  71        /* Check config is valid, no bank over subscription */
  72        for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++)
  73                total_bank_config += mlxsw_sp_counter_sub_pools[i].bank_count;
  74        if (total_bank_config > pool_size / MLXSW_SP_COUNTER_POOL_BANK_SIZE + 1)
  75                return -EINVAL;
  76        return 0;
  77}
  78
  79static int mlxsw_sp_counter_sub_pools_prepare(struct mlxsw_sp *mlxsw_sp)
  80{
  81        struct mlxsw_sp_counter_sub_pool *sub_pool;
  82
  83        /* Prepare generic flow pool*/
  84        sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_FLOW];
  85        if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_PACKETS_BYTES))
  86                return -EIO;
  87        sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  88                                                  COUNTER_SIZE_PACKETS_BYTES);
  89        /* Prepare erif pool*/
  90        sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_RIF];
  91        if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_ROUTER_BASIC))
  92                return -EIO;
  93        sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  94                                                  COUNTER_SIZE_ROUTER_BASIC);
  95        return 0;
  96}
  97
  98int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
  99{
 100        struct mlxsw_sp_counter_sub_pool *sub_pool;
 101        struct mlxsw_sp_counter_pool *pool;
 102        unsigned int base_index;
 103        unsigned int map_size;
 104        int i;
 105        int err;
 106
 107        if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_POOL_SIZE))
 108                return -EIO;
 109
 110        err = mlxsw_sp_counter_pool_validate(mlxsw_sp);
 111        if (err)
 112                return err;
 113
 114        err = mlxsw_sp_counter_sub_pools_prepare(mlxsw_sp);
 115        if (err)
 116                return err;
 117
 118        pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 119        if (!pool)
 120                return -ENOMEM;
 121
 122        pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
 123        map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
 124
 125        pool->usage = kzalloc(map_size, GFP_KERNEL);
 126        if (!pool->usage) {
 127                err = -ENOMEM;
 128                goto err_usage_alloc;
 129        }
 130
 131        pool->sub_pools = mlxsw_sp_counter_sub_pools;
 132        /* Allocation is based on bank count which should be
 133         * specified for each sub pool statically.
 134         */
 135        base_index = 0;
 136        for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) {
 137                sub_pool = &pool->sub_pools[i];
 138                sub_pool->size = sub_pool->bank_count *
 139                                 MLXSW_SP_COUNTER_POOL_BANK_SIZE;
 140                sub_pool->base_index = base_index;
 141                base_index += sub_pool->size;
 142                /* The last bank can't be fully used */
 143                if (sub_pool->base_index + sub_pool->size > pool->pool_size)
 144                        sub_pool->size = pool->pool_size - sub_pool->base_index;
 145        }
 146
 147        mlxsw_sp->counter_pool = pool;
 148        return 0;
 149
 150err_usage_alloc:
 151        kfree(pool);
 152        return err;
 153}
 154
 155void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
 156{
 157        struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
 158
 159        WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
 160                               pool->pool_size);
 161        kfree(pool->usage);
 162        kfree(pool);
 163}
 164
 165int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
 166                           enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
 167                           unsigned int *p_counter_index)
 168{
 169        struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
 170        struct mlxsw_sp_counter_sub_pool *sub_pool;
 171        unsigned int entry_index;
 172        unsigned int stop_index;
 173        int i;
 174
 175        sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
 176        stop_index = sub_pool->base_index + sub_pool->size;
 177        entry_index = sub_pool->base_index;
 178
 179        entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index);
 180        if (entry_index == stop_index)
 181                return -ENOBUFS;
 182        /* The sub-pools can contain non-integer number of entries
 183         * so we must check for overflow
 184         */
 185        if (entry_index + sub_pool->entry_size > stop_index)
 186                return -ENOBUFS;
 187        for (i = 0; i < sub_pool->entry_size; i++)
 188                __set_bit(entry_index + i, pool->usage);
 189
 190        *p_counter_index = entry_index;
 191        return 0;
 192}
 193
 194void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
 195                           enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
 196                           unsigned int counter_index)
 197{
 198        struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
 199        struct mlxsw_sp_counter_sub_pool *sub_pool;
 200        int i;
 201
 202        if (WARN_ON(counter_index >= pool->pool_size))
 203                return;
 204        sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
 205        for (i = 0; i < sub_pool->entry_size; i++)
 206                __clear_bit(counter_index + i, pool->usage);
 207}
 208