linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
<<
>>
Prefs
   1/*
   2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
   3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
   4 * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
   5 *
   6 * Redistribution and use in source and binary forms, with or without
   7 * modification, are permitted provided that the following conditions are met:
   8 *
   9 * 1. Redistributions of source code must retain the above copyright
  10 *    notice, this list of conditions and the following disclaimer.
  11 * 2. Redistributions in binary form must reproduce the above copyright
  12 *    notice, this list of conditions and the following disclaimer in the
  13 *    documentation and/or other materials provided with the distribution.
  14 * 3. Neither the names of the copyright holders nor the names of its
  15 *    contributors may be used to endorse or promote products derived from
  16 *    this software without specific prior written permission.
  17 *
  18 * Alternatively, this software may be distributed under the terms of the
  19 * GNU General Public License ("GPL") version 2 as published by the Free
  20 * Software Foundation.
  21 *
  22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  32 * POSSIBILITY OF SUCH DAMAGE.
  33 */
  34
  35#include <linux/kernel.h>
  36#include <linux/slab.h>
  37#include <linux/errno.h>
  38#include <linux/bitops.h>
  39#include <linux/list.h>
  40#include <linux/rhashtable.h>
  41#include <linux/netdevice.h>
  42#include <linux/parman.h>
  43
  44#include "reg.h"
  45#include "core.h"
  46#include "resources.h"
  47#include "spectrum.h"
  48#include "core_acl_flex_keys.h"
  49
  50struct mlxsw_sp_acl_tcam {
  51        unsigned long *used_regions; /* bit array */
  52        unsigned int max_regions;
  53        unsigned long *used_groups;  /* bit array */
  54        unsigned int max_groups;
  55        unsigned int max_group_size;
  56};
  57
  58static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
  59{
  60        struct mlxsw_sp_acl_tcam *tcam = priv;
  61        u64 max_tcam_regions;
  62        u64 max_regions;
  63        u64 max_groups;
  64        size_t alloc_size;
  65        int err;
  66
  67        max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  68                                              ACL_MAX_TCAM_REGIONS);
  69        max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
  70
  71        /* Use 1:1 mapping between ACL region and TCAM region */
  72        if (max_tcam_regions < max_regions)
  73                max_regions = max_tcam_regions;
  74
  75        alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
  76        tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
  77        if (!tcam->used_regions)
  78                return -ENOMEM;
  79        tcam->max_regions = max_regions;
  80
  81        max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
  82        alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
  83        tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
  84        if (!tcam->used_groups) {
  85                err = -ENOMEM;
  86                goto err_alloc_used_groups;
  87        }
  88        tcam->max_groups = max_groups;
  89        tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  90                                                 ACL_MAX_GROUP_SIZE);
  91        return 0;
  92
  93err_alloc_used_groups:
  94        kfree(tcam->used_regions);
  95        return err;
  96}
  97
  98static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
  99{
 100        struct mlxsw_sp_acl_tcam *tcam = priv;
 101
 102        kfree(tcam->used_groups);
 103        kfree(tcam->used_regions);
 104}
 105
 106static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
 107                                           u16 *p_id)
 108{
 109        u16 id;
 110
 111        id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
 112        if (id < tcam->max_regions) {
 113                __set_bit(id, tcam->used_regions);
 114                *p_id = id;
 115                return 0;
 116        }
 117        return -ENOBUFS;
 118}
 119
 120static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
 121                                            u16 id)
 122{
 123        __clear_bit(id, tcam->used_regions);
 124}
 125
 126static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
 127                                          u16 *p_id)
 128{
 129        u16 id;
 130
 131        id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
 132        if (id < tcam->max_groups) {
 133                __set_bit(id, tcam->used_groups);
 134                *p_id = id;
 135                return 0;
 136        }
 137        return -ENOBUFS;
 138}
 139
 140static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
 141                                           u16 id)
 142{
 143        __clear_bit(id, tcam->used_groups);
 144}
 145
 146struct mlxsw_sp_acl_tcam_pattern {
 147        const enum mlxsw_afk_element *elements;
 148        unsigned int elements_count;
 149};
 150
 151struct mlxsw_sp_acl_tcam_group {
 152        struct mlxsw_sp_acl_tcam *tcam;
 153        u16 id;
 154        struct list_head region_list;
 155        unsigned int region_count;
 156        struct rhashtable chunk_ht;
 157        struct mlxsw_sp_acl_tcam_group_ops *ops;
 158        const struct mlxsw_sp_acl_tcam_pattern *patterns;
 159        unsigned int patterns_count;
 160};
 161
 162struct mlxsw_sp_acl_tcam_region {
 163        struct list_head list; /* Member of a TCAM group */
 164        struct list_head chunk_list; /* List of chunks under this region */
 165        struct parman *parman;
 166        struct mlxsw_sp *mlxsw_sp;
 167        struct mlxsw_sp_acl_tcam_group *group;
 168        u16 id; /* ACL ID and region ID - they are same */
 169        char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
 170        struct mlxsw_afk_key_info *key_info;
 171        struct {
 172                struct parman_prio parman_prio;
 173                struct parman_item parman_item;
 174                struct mlxsw_sp_acl_rule_info *rulei;
 175        } catchall;
 176};
 177
 178struct mlxsw_sp_acl_tcam_chunk {
 179        struct list_head list; /* Member of a TCAM region */
 180        struct rhash_head ht_node; /* Member of a chunk HT */
 181        unsigned int priority; /* Priority within the region and group */
 182        struct parman_prio parman_prio;
 183        struct mlxsw_sp_acl_tcam_group *group;
 184        struct mlxsw_sp_acl_tcam_region *region;
 185        unsigned int ref_count;
 186};
 187
 188struct mlxsw_sp_acl_tcam_entry {
 189        struct parman_item parman_item;
 190        struct mlxsw_sp_acl_tcam_chunk *chunk;
 191};
 192
 193static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = {
 194        .key_len = sizeof(unsigned int),
 195        .key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority),
 196        .head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node),
 197        .automatic_shrinking = true,
 198};
 199
 200static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
 201                                          struct mlxsw_sp_acl_tcam_group *group)
 202{
 203        struct mlxsw_sp_acl_tcam_region *region;
 204        char pagt_pl[MLXSW_REG_PAGT_LEN];
 205        int acl_index = 0;
 206
 207        mlxsw_reg_pagt_pack(pagt_pl, group->id);
 208        list_for_each_entry(region, &group->region_list, list)
 209                mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id);
 210        mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
 211        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
 212}
 213
 214static int
 215mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
 216                            struct mlxsw_sp_acl_tcam *tcam,
 217                            struct mlxsw_sp_acl_tcam_group *group,
 218                            const struct mlxsw_sp_acl_tcam_pattern *patterns,
 219                            unsigned int patterns_count)
 220{
 221        int err;
 222
 223        group->tcam = tcam;
 224        group->patterns = patterns;
 225        group->patterns_count = patterns_count;
 226        INIT_LIST_HEAD(&group->region_list);
 227        err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
 228        if (err)
 229                return err;
 230
 231        err = rhashtable_init(&group->chunk_ht,
 232                              &mlxsw_sp_acl_tcam_chunk_ht_params);
 233        if (err)
 234                goto err_rhashtable_init;
 235
 236        return 0;
 237
 238err_rhashtable_init:
 239        mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
 240        return err;
 241}
 242
 243static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
 244                                        struct mlxsw_sp_acl_tcam_group *group)
 245{
 246        struct mlxsw_sp_acl_tcam *tcam = group->tcam;
 247
 248        rhashtable_destroy(&group->chunk_ht);
 249        mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
 250        WARN_ON(!list_empty(&group->region_list));
 251}
 252
 253static int
 254mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
 255                             struct mlxsw_sp_acl_tcam_group *group,
 256                             struct mlxsw_sp_port *mlxsw_sp_port,
 257                             bool ingress)
 258{
 259        char ppbt_pl[MLXSW_REG_PPBT_LEN];
 260
 261        mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
 262                                               MLXSW_REG_PXBT_E_EACL,
 263                            MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
 264                            group->id);
 265        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
 266}
 267
 268static void
 269mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
 270                               struct mlxsw_sp_acl_tcam_group *group,
 271                               struct mlxsw_sp_port *mlxsw_sp_port,
 272                               bool ingress)
 273{
 274        char ppbt_pl[MLXSW_REG_PPBT_LEN];
 275
 276        mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
 277                                               MLXSW_REG_PXBT_E_EACL,
 278                            MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
 279                            group->id);
 280        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
 281}
 282
 283static u16
 284mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
 285{
 286        return group->id;
 287}
 288
 289static unsigned int
 290mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region)
 291{
 292        struct mlxsw_sp_acl_tcam_chunk *chunk;
 293
 294        if (list_empty(&region->chunk_list))
 295                return 0;
 296        /* As a priority of a region, return priority of the first chunk */
 297        chunk = list_first_entry(&region->chunk_list, typeof(*chunk), list);
 298        return chunk->priority;
 299}
 300
 301static unsigned int
 302mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region)
 303{
 304        struct mlxsw_sp_acl_tcam_chunk *chunk;
 305
 306        if (list_empty(&region->chunk_list))
 307                return 0;
 308        chunk = list_last_entry(&region->chunk_list, typeof(*chunk), list);
 309        return chunk->priority;
 310}
 311
 312static void
 313mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group,
 314                                 struct mlxsw_sp_acl_tcam_region *region)
 315{
 316        struct mlxsw_sp_acl_tcam_region *region2;
 317        struct list_head *pos;
 318
 319        /* Position the region inside the list according to priority */
 320        list_for_each(pos, &group->region_list) {
 321                region2 = list_entry(pos, typeof(*region2), list);
 322                if (mlxsw_sp_acl_tcam_region_prio(region2) >
 323                    mlxsw_sp_acl_tcam_region_prio(region))
 324                        break;
 325        }
 326        list_add_tail(&region->list, pos);
 327        group->region_count++;
 328}
 329
 330static void
 331mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group,
 332                                 struct mlxsw_sp_acl_tcam_region *region)
 333{
 334        group->region_count--;
 335        list_del(&region->list);
 336}
 337
 338static int
 339mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
 340                                      struct mlxsw_sp_acl_tcam_group *group,
 341                                      struct mlxsw_sp_acl_tcam_region *region)
 342{
 343        int err;
 344
 345        if (group->region_count == group->tcam->max_group_size)
 346                return -ENOBUFS;
 347
 348        mlxsw_sp_acl_tcam_group_list_add(group, region);
 349
 350        err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
 351        if (err)
 352                goto err_group_update;
 353        region->group = group;
 354
 355        return 0;
 356
 357err_group_update:
 358        mlxsw_sp_acl_tcam_group_list_del(group, region);
 359        mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
 360        return err;
 361}
 362
 363static void
 364mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
 365                                      struct mlxsw_sp_acl_tcam_region *region)
 366{
 367        struct mlxsw_sp_acl_tcam_group *group = region->group;
 368
 369        mlxsw_sp_acl_tcam_group_list_del(group, region);
 370        mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
 371}
 372
 373static struct mlxsw_sp_acl_tcam_region *
 374mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group,
 375                                    unsigned int priority,
 376                                    struct mlxsw_afk_element_usage *elusage,
 377                                    bool *p_need_split)
 378{
 379        struct mlxsw_sp_acl_tcam_region *region, *region2;
 380        struct list_head *pos;
 381        bool issubset;
 382
 383        list_for_each(pos, &group->region_list) {
 384                region = list_entry(pos, typeof(*region), list);
 385
 386                /* First, check if the requested priority does not rather belong
 387                 * under some of the next regions.
 388                 */
 389                if (pos->next != &group->region_list) { /* not last */
 390                        region2 = list_entry(pos->next, typeof(*region2), list);
 391                        if (priority >= mlxsw_sp_acl_tcam_region_prio(region2))
 392                                continue;
 393                }
 394
 395                issubset = mlxsw_afk_key_info_subset(region->key_info, elusage);
 396
 397                /* If requested element usage would not fit and the priority
 398                 * is lower than the currently inspected region we cannot
 399                 * use this region, so return NULL to indicate new region has
 400                 * to be created.
 401                 */
 402                if (!issubset &&
 403                    priority < mlxsw_sp_acl_tcam_region_prio(region))
 404                        return NULL;
 405
 406                /* If requested element usage would not fit and the priority
 407                 * is higher than the currently inspected region we cannot
 408                 * use this region. There is still some hope that the next
 409                 * region would be the fit. So let it be processed and
 410                 * eventually break at the check right above this.
 411                 */
 412                if (!issubset &&
 413                    priority > mlxsw_sp_acl_tcam_region_max_prio(region))
 414                        continue;
 415
 416                /* Indicate if the region needs to be split in order to add
 417                 * the requested priority. Split is needed when requested
 418                 * element usage won't fit into the found region.
 419                 */
 420                *p_need_split = !issubset;
 421                return region;
 422        }
 423        return NULL; /* New region has to be created. */
 424}
 425
 426static void
 427mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
 428                                     struct mlxsw_afk_element_usage *elusage,
 429                                     struct mlxsw_afk_element_usage *out)
 430{
 431        const struct mlxsw_sp_acl_tcam_pattern *pattern;
 432        int i;
 433
 434        for (i = 0; i < group->patterns_count; i++) {
 435                pattern = &group->patterns[i];
 436                mlxsw_afk_element_usage_fill(out, pattern->elements,
 437                                             pattern->elements_count);
 438                if (mlxsw_afk_element_usage_subset(elusage, out))
 439                        return;
 440        }
 441        memcpy(out, elusage, sizeof(*out));
 442}
 443
 444#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16
 445#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16
 446
 447static int
 448mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
 449                               struct mlxsw_sp_acl_tcam_region *region)
 450{
 451        struct mlxsw_afk_key_info *key_info = region->key_info;
 452        char ptar_pl[MLXSW_REG_PTAR_LEN];
 453        unsigned int encodings_count;
 454        int i;
 455        int err;
 456
 457        mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
 458                            MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
 459                            region->id, region->tcam_region_info);
 460        encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
 461        for (i = 0; i < encodings_count; i++) {
 462                u16 encoding;
 463
 464                encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
 465                mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
 466        }
 467        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
 468        if (err)
 469                return err;
 470        mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
 471        return 0;
 472}
 473
 474static void
 475mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
 476                              struct mlxsw_sp_acl_tcam_region *region)
 477{
 478        char ptar_pl[MLXSW_REG_PTAR_LEN];
 479
 480        mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 0, region->id,
 481                            region->tcam_region_info);
 482        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
 483}
 484
 485static int
 486mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp,
 487                                struct mlxsw_sp_acl_tcam_region *region,
 488                                u16 new_size)
 489{
 490        char ptar_pl[MLXSW_REG_PTAR_LEN];
 491
 492        mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE,
 493                            new_size, region->id, region->tcam_region_info);
 494        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
 495}
 496
 497static int
 498mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
 499                                struct mlxsw_sp_acl_tcam_region *region)
 500{
 501        char pacl_pl[MLXSW_REG_PACL_LEN];
 502
 503        mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
 504                            region->tcam_region_info);
 505        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
 506}
 507
 508static void
 509mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
 510                                 struct mlxsw_sp_acl_tcam_region *region)
 511{
 512        char pacl_pl[MLXSW_REG_PACL_LEN];
 513
 514        mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
 515                            region->tcam_region_info);
 516        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
 517}
 518
 519static int
 520mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
 521                                      struct mlxsw_sp_acl_tcam_region *region,
 522                                      unsigned int offset,
 523                                      struct mlxsw_sp_acl_rule_info *rulei)
 524{
 525        char ptce2_pl[MLXSW_REG_PTCE2_LEN];
 526        char *act_set;
 527        char *mask;
 528        char *key;
 529
 530        mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
 531                             region->tcam_region_info, offset);
 532        key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
 533        mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
 534        mlxsw_afk_encode(region->key_info, &rulei->values, key, mask);
 535
 536        /* Only the first action set belongs here, the rest is in KVD */
 537        act_set = mlxsw_afa_block_first_set(rulei->act_block);
 538        mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
 539
 540        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
 541}
 542
 543static void
 544mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
 545                                      struct mlxsw_sp_acl_tcam_region *region,
 546                                      unsigned int offset)
 547{
 548        char ptce2_pl[MLXSW_REG_PTCE2_LEN];
 549
 550        mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
 551                             region->tcam_region_info, offset);
 552        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
 553}
 554
 555static int
 556mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
 557                                            struct mlxsw_sp_acl_tcam_region *region,
 558                                            unsigned int offset,
 559                                            bool *activity)
 560{
 561        char ptce2_pl[MLXSW_REG_PTCE2_LEN];
 562        int err;
 563
 564        mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
 565                             region->tcam_region_info, offset);
 566        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
 567        if (err)
 568                return err;
 569        *activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
 570        return 0;
 571}
 572
 573#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
 574
 575static int
 576mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
 577                                      struct mlxsw_sp_acl_tcam_region *region)
 578{
 579        struct parman_prio *parman_prio = &region->catchall.parman_prio;
 580        struct parman_item *parman_item = &region->catchall.parman_item;
 581        struct mlxsw_sp_acl_rule_info *rulei;
 582        int err;
 583
 584        parman_prio_init(region->parman, parman_prio,
 585                         MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
 586        err = parman_item_add(region->parman, parman_prio, parman_item);
 587        if (err)
 588                goto err_parman_item_add;
 589
 590        rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
 591        if (IS_ERR(rulei)) {
 592                err = PTR_ERR(rulei);
 593                goto err_rulei_create;
 594        }
 595
 596        err = mlxsw_sp_acl_rulei_act_continue(rulei);
 597        if (WARN_ON(err))
 598                goto err_rulei_act_continue;
 599
 600        err = mlxsw_sp_acl_rulei_commit(rulei);
 601        if (err)
 602                goto err_rulei_commit;
 603
 604        err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
 605                                                    parman_item->index, rulei);
 606        region->catchall.rulei = rulei;
 607        if (err)
 608                goto err_rule_insert;
 609
 610        return 0;
 611
 612err_rule_insert:
 613err_rulei_commit:
 614err_rulei_act_continue:
 615        mlxsw_sp_acl_rulei_destroy(rulei);
 616err_rulei_create:
 617        parman_item_remove(region->parman, parman_prio, parman_item);
 618err_parman_item_add:
 619        parman_prio_fini(parman_prio);
 620        return err;
 621}
 622
 623static void
 624mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
 625                                      struct mlxsw_sp_acl_tcam_region *region)
 626{
 627        struct parman_prio *parman_prio = &region->catchall.parman_prio;
 628        struct parman_item *parman_item = &region->catchall.parman_item;
 629        struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei;
 630
 631        mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
 632                                              parman_item->index);
 633        mlxsw_sp_acl_rulei_destroy(rulei);
 634        parman_item_remove(region->parman, parman_prio, parman_item);
 635        parman_prio_fini(parman_prio);
 636}
 637
 638static void
 639mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp,
 640                              struct mlxsw_sp_acl_tcam_region *region,
 641                              u16 src_offset, u16 dst_offset, u16 size)
 642{
 643        char prcr_pl[MLXSW_REG_PRCR_LEN];
 644
 645        mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE,
 646                            region->tcam_region_info, src_offset,
 647                            region->tcam_region_info, dst_offset, size);
 648        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl);
 649}
 650
 651static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv,
 652                                                  unsigned long new_count)
 653{
 654        struct mlxsw_sp_acl_tcam_region *region = priv;
 655        struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
 656        u64 max_tcam_rules;
 657
 658        max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
 659        if (new_count > max_tcam_rules)
 660                return -EINVAL;
 661        return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count);
 662}
 663
 664static void mlxsw_sp_acl_tcam_region_parman_move(void *priv,
 665                                                 unsigned long from_index,
 666                                                 unsigned long to_index,
 667                                                 unsigned long count)
 668{
 669        struct mlxsw_sp_acl_tcam_region *region = priv;
 670        struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
 671
 672        mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region,
 673                                      from_index, to_index, count);
 674}
 675
 676static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = {
 677        .base_count     = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
 678        .resize_step    = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP,
 679        .resize         = mlxsw_sp_acl_tcam_region_parman_resize,
 680        .move           = mlxsw_sp_acl_tcam_region_parman_move,
 681        .algo           = PARMAN_ALGO_TYPE_LSORT,
 682};
 683
 684static struct mlxsw_sp_acl_tcam_region *
 685mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
 686                                struct mlxsw_sp_acl_tcam *tcam,
 687                                struct mlxsw_afk_element_usage *elusage)
 688{
 689        struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
 690        struct mlxsw_sp_acl_tcam_region *region;
 691        int err;
 692
 693        region = kzalloc(sizeof(*region), GFP_KERNEL);
 694        if (!region)
 695                return ERR_PTR(-ENOMEM);
 696        INIT_LIST_HEAD(&region->chunk_list);
 697        region->mlxsw_sp = mlxsw_sp;
 698
 699        region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops,
 700                                       region);
 701        if (!region->parman) {
 702                err = -ENOMEM;
 703                goto err_parman_create;
 704        }
 705
 706        region->key_info = mlxsw_afk_key_info_get(afk, elusage);
 707        if (IS_ERR(region->key_info)) {
 708                err = PTR_ERR(region->key_info);
 709                goto err_key_info_get;
 710        }
 711
 712        err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
 713        if (err)
 714                goto err_region_id_get;
 715
 716        err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
 717        if (err)
 718                goto err_tcam_region_alloc;
 719
 720        err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
 721        if (err)
 722                goto err_tcam_region_enable;
 723
 724        err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region);
 725        if (err)
 726                goto err_tcam_region_catchall_add;
 727
 728        return region;
 729
 730err_tcam_region_catchall_add:
 731        mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
 732err_tcam_region_enable:
 733        mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
 734err_tcam_region_alloc:
 735        mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
 736err_region_id_get:
 737        mlxsw_afk_key_info_put(region->key_info);
 738err_key_info_get:
 739        parman_destroy(region->parman);
 740err_parman_create:
 741        kfree(region);
 742        return ERR_PTR(err);
 743}
 744
 745static void
 746mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
 747                                 struct mlxsw_sp_acl_tcam_region *region)
 748{
 749        mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region);
 750        mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
 751        mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
 752        mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id);
 753        mlxsw_afk_key_info_put(region->key_info);
 754        parman_destroy(region->parman);
 755        kfree(region);
 756}
 757
 758static int
 759mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp,
 760                              struct mlxsw_sp_acl_tcam_group *group,
 761                              unsigned int priority,
 762                              struct mlxsw_afk_element_usage *elusage,
 763                              struct mlxsw_sp_acl_tcam_chunk *chunk)
 764{
 765        struct mlxsw_sp_acl_tcam_region *region;
 766        bool region_created = false;
 767        bool need_split;
 768        int err;
 769
 770        region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage,
 771                                                     &need_split);
 772        if (region && need_split) {
 773                /* According to priority, the chunk should belong to an
 774                 * existing region. However, this chunk needs elements
 775                 * that region does not contain. We need to split the existing
 776                 * region into two and create a new region for this chunk
 777                 * in between. This is not supported now.
 778                 */
 779                return -EOPNOTSUPP;
 780        }
 781        if (!region) {
 782                struct mlxsw_afk_element_usage region_elusage;
 783
 784                mlxsw_sp_acl_tcam_group_use_patterns(group, elusage,
 785                                                     &region_elusage);
 786                region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam,
 787                                                         &region_elusage);
 788                if (IS_ERR(region))
 789                        return PTR_ERR(region);
 790                region_created = true;
 791        }
 792
 793        chunk->region = region;
 794        list_add_tail(&chunk->list, &region->chunk_list);
 795
 796        if (!region_created)
 797                return 0;
 798
 799        err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region);
 800        if (err)
 801                goto err_group_region_attach;
 802
 803        return 0;
 804
 805err_group_region_attach:
 806        mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
 807        return err;
 808}
 809
 810static void
 811mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp,
 812                                struct mlxsw_sp_acl_tcam_chunk *chunk)
 813{
 814        struct mlxsw_sp_acl_tcam_region *region = chunk->region;
 815
 816        list_del(&chunk->list);
 817        if (list_empty(&region->chunk_list)) {
 818                mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region);
 819                mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
 820        }
 821}
 822
 823static struct mlxsw_sp_acl_tcam_chunk *
 824mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
 825                               struct mlxsw_sp_acl_tcam_group *group,
 826                               unsigned int priority,
 827                               struct mlxsw_afk_element_usage *elusage)
 828{
 829        struct mlxsw_sp_acl_tcam_chunk *chunk;
 830        int err;
 831
 832        if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
 833                return ERR_PTR(-EINVAL);
 834
 835        chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
 836        if (!chunk)
 837                return ERR_PTR(-ENOMEM);
 838        chunk->priority = priority;
 839        chunk->group = group;
 840        chunk->ref_count = 1;
 841
 842        err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority,
 843                                            elusage, chunk);
 844        if (err)
 845                goto err_chunk_assoc;
 846
 847        parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority);
 848
 849        err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node,
 850                                     mlxsw_sp_acl_tcam_chunk_ht_params);
 851        if (err)
 852                goto err_rhashtable_insert;
 853
 854        return chunk;
 855
 856err_rhashtable_insert:
 857        parman_prio_fini(&chunk->parman_prio);
 858        mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
 859err_chunk_assoc:
 860        kfree(chunk);
 861        return ERR_PTR(err);
 862}
 863
 864static void
 865mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
 866                                struct mlxsw_sp_acl_tcam_chunk *chunk)
 867{
 868        struct mlxsw_sp_acl_tcam_group *group = chunk->group;
 869
 870        rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node,
 871                               mlxsw_sp_acl_tcam_chunk_ht_params);
 872        parman_prio_fini(&chunk->parman_prio);
 873        mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
 874        kfree(chunk);
 875}
 876
 877static struct mlxsw_sp_acl_tcam_chunk *
 878mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp,
 879                            struct mlxsw_sp_acl_tcam_group *group,
 880                            unsigned int priority,
 881                            struct mlxsw_afk_element_usage *elusage)
 882{
 883        struct mlxsw_sp_acl_tcam_chunk *chunk;
 884
 885        chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority,
 886                                       mlxsw_sp_acl_tcam_chunk_ht_params);
 887        if (chunk) {
 888                if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info,
 889                                                       elusage)))
 890                        return ERR_PTR(-EINVAL);
 891                chunk->ref_count++;
 892                return chunk;
 893        }
 894        return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group,
 895                                              priority, elusage);
 896}
 897
 898static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp,
 899                                        struct mlxsw_sp_acl_tcam_chunk *chunk)
 900{
 901        if (--chunk->ref_count)
 902                return;
 903        mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk);
 904}
 905
 906static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
 907                                       struct mlxsw_sp_acl_tcam_group *group,
 908                                       struct mlxsw_sp_acl_tcam_entry *entry,
 909                                       struct mlxsw_sp_acl_rule_info *rulei)
 910{
 911        struct mlxsw_sp_acl_tcam_chunk *chunk;
 912        struct mlxsw_sp_acl_tcam_region *region;
 913        int err;
 914
 915        chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority,
 916                                            &rulei->values.elusage);
 917        if (IS_ERR(chunk))
 918                return PTR_ERR(chunk);
 919
 920        region = chunk->region;
 921        err = parman_item_add(region->parman, &chunk->parman_prio,
 922                              &entry->parman_item);
 923        if (err)
 924                goto err_parman_item_add;
 925
 926        err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
 927                                                    entry->parman_item.index,
 928                                                    rulei);
 929        if (err)
 930                goto err_rule_insert;
 931        entry->chunk = chunk;
 932
 933        return 0;
 934
 935err_rule_insert:
 936        parman_item_remove(region->parman, &chunk->parman_prio,
 937                           &entry->parman_item);
 938err_parman_item_add:
 939        mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
 940        return err;
 941}
 942
 943static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
 944                                        struct mlxsw_sp_acl_tcam_entry *entry)
 945{
 946        struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
 947        struct mlxsw_sp_acl_tcam_region *region = chunk->region;
 948
 949        mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
 950                                              entry->parman_item.index);
 951        parman_item_remove(region->parman, &chunk->parman_prio,
 952                           &entry->parman_item);
 953        mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
 954}
 955
 956static int
 957mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
 958                                     struct mlxsw_sp_acl_tcam_entry *entry,
 959                                     bool *activity)
 960{
 961        struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
 962        struct mlxsw_sp_acl_tcam_region *region = chunk->region;
 963
 964        return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region,
 965                                                           entry->parman_item.index,
 966                                                           activity);
 967}
 968
 969static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
 970        MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
 971        MLXSW_AFK_ELEMENT_DMAC,
 972        MLXSW_AFK_ELEMENT_SMAC,
 973        MLXSW_AFK_ELEMENT_ETHERTYPE,
 974        MLXSW_AFK_ELEMENT_IP_PROTO,
 975        MLXSW_AFK_ELEMENT_SRC_IP4,
 976        MLXSW_AFK_ELEMENT_DST_IP4,
 977        MLXSW_AFK_ELEMENT_DST_L4_PORT,
 978        MLXSW_AFK_ELEMENT_SRC_L4_PORT,
 979        MLXSW_AFK_ELEMENT_VID,
 980        MLXSW_AFK_ELEMENT_PCP,
 981        MLXSW_AFK_ELEMENT_TCP_FLAGS,
 982        MLXSW_AFK_ELEMENT_IP_TTL_,
 983        MLXSW_AFK_ELEMENT_IP_ECN,
 984        MLXSW_AFK_ELEMENT_IP_DSCP,
 985};
 986
 987static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
 988        MLXSW_AFK_ELEMENT_ETHERTYPE,
 989        MLXSW_AFK_ELEMENT_IP_PROTO,
 990        MLXSW_AFK_ELEMENT_SRC_IP6_HI,
 991        MLXSW_AFK_ELEMENT_SRC_IP6_LO,
 992        MLXSW_AFK_ELEMENT_DST_IP6_HI,
 993        MLXSW_AFK_ELEMENT_DST_IP6_LO,
 994        MLXSW_AFK_ELEMENT_DST_L4_PORT,
 995        MLXSW_AFK_ELEMENT_SRC_L4_PORT,
 996};
 997
 998static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
 999        {
1000                .elements = mlxsw_sp_acl_tcam_pattern_ipv4,
1001                .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
1002        },
1003        {
1004                .elements = mlxsw_sp_acl_tcam_pattern_ipv6,
1005                .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
1006        },
1007};
1008
1009#define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
1010        ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
1011
1012struct mlxsw_sp_acl_tcam_flower_ruleset {
1013        struct mlxsw_sp_acl_tcam_group group;
1014};
1015
1016struct mlxsw_sp_acl_tcam_flower_rule {
1017        struct mlxsw_sp_acl_tcam_entry entry;
1018};
1019
1020static int
1021mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1022                                     void *priv, void *ruleset_priv)
1023{
1024        struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1025        struct mlxsw_sp_acl_tcam *tcam = priv;
1026
1027        return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
1028                                           mlxsw_sp_acl_tcam_patterns,
1029                                           MLXSW_SP_ACL_TCAM_PATTERNS_COUNT);
1030}
1031
1032static void
1033mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
1034                                     void *ruleset_priv)
1035{
1036        struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1037
1038        mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
1039}
1040
1041static int
1042mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
1043                                      void *ruleset_priv,
1044                                      struct mlxsw_sp_port *mlxsw_sp_port,
1045                                      bool ingress)
1046{
1047        struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1048
1049        return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
1050                                            mlxsw_sp_port, ingress);
1051}
1052
1053static void
1054mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1055                                        void *ruleset_priv,
1056                                        struct mlxsw_sp_port *mlxsw_sp_port,
1057                                        bool ingress)
1058{
1059        struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1060
1061        mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group,
1062                                       mlxsw_sp_port, ingress);
1063}
1064
1065static u16
1066mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
1067{
1068        struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1069
1070        return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
1071}
1072
1073static int
1074mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
1075                                  void *ruleset_priv, void *rule_priv,
1076                                  struct mlxsw_sp_acl_rule_info *rulei)
1077{
1078        struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1079        struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1080
1081        return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group,
1082                                           &rule->entry, rulei);
1083}
1084
1085static void
1086mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1087{
1088        struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1089
1090        mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
1091}
1092
1093static int
1094mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1095                                           void *rule_priv, bool *activity)
1096{
1097        struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1098
1099        return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
1100                                                    activity);
1101}
1102
1103static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
1104        .ruleset_priv_size      = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
1105        .ruleset_add            = mlxsw_sp_acl_tcam_flower_ruleset_add,
1106        .ruleset_del            = mlxsw_sp_acl_tcam_flower_ruleset_del,
1107        .ruleset_bind           = mlxsw_sp_acl_tcam_flower_ruleset_bind,
1108        .ruleset_unbind         = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
1109        .ruleset_group_id       = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
1110        .rule_priv_size         = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
1111        .rule_add               = mlxsw_sp_acl_tcam_flower_rule_add,
1112        .rule_del               = mlxsw_sp_acl_tcam_flower_rule_del,
1113        .rule_activity_get      = mlxsw_sp_acl_tcam_flower_rule_activity_get,
1114};
1115
1116static const struct mlxsw_sp_acl_profile_ops *
1117mlxsw_sp_acl_tcam_profile_ops_arr[] = {
1118        [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
1119};
1120
1121static const struct mlxsw_sp_acl_profile_ops *
1122mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
1123                              enum mlxsw_sp_acl_profile profile)
1124{
1125        const struct mlxsw_sp_acl_profile_ops *ops;
1126
1127        if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
1128                return NULL;
1129        ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
1130        if (WARN_ON(!ops))
1131                return NULL;
1132        return ops;
1133}
1134
1135const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = {
1136        .priv_size              = sizeof(struct mlxsw_sp_acl_tcam),
1137        .init                   = mlxsw_sp_acl_tcam_init,
1138        .fini                   = mlxsw_sp_acl_tcam_fini,
1139        .profile_ops            = mlxsw_sp_acl_tcam_profile_ops,
1140};
1141