linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
<<
>>
Prefs
   1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
   2/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
   3
   4#include <linux/kernel.h>
   5#include <linux/slab.h>
   6#include <linux/errno.h>
   7#include <linux/bitops.h>
   8#include <linux/list.h>
   9#include <linux/rhashtable.h>
  10#include <linux/netdevice.h>
  11#include <linux/mutex.h>
  12#include <trace/events/mlxsw.h>
  13
  14#include "reg.h"
  15#include "core.h"
  16#include "resources.h"
  17#include "spectrum.h"
  18#include "spectrum_acl_tcam.h"
  19#include "core_acl_flex_keys.h"
  20
  21size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
  22{
  23        const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
  24
  25        return ops->priv_size;
  26}
  27
  28#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */
  29#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
  30#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */
  31
  32int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
  33                           struct mlxsw_sp_acl_tcam *tcam)
  34{
  35        const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
  36        u64 max_tcam_regions;
  37        u64 max_regions;
  38        u64 max_groups;
  39        size_t alloc_size;
  40        int err;
  41
  42        mutex_init(&tcam->lock);
  43        tcam->vregion_rehash_intrvl =
  44                        MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
  45        INIT_LIST_HEAD(&tcam->vregion_list);
  46
  47        max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  48                                              ACL_MAX_TCAM_REGIONS);
  49        max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
  50
  51        /* Use 1:1 mapping between ACL region and TCAM region */
  52        if (max_tcam_regions < max_regions)
  53                max_regions = max_tcam_regions;
  54
  55        alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
  56        tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
  57        if (!tcam->used_regions)
  58                return -ENOMEM;
  59        tcam->max_regions = max_regions;
  60
  61        max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
  62        alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
  63        tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
  64        if (!tcam->used_groups) {
  65                err = -ENOMEM;
  66                goto err_alloc_used_groups;
  67        }
  68        tcam->max_groups = max_groups;
  69        tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  70                                                 ACL_MAX_GROUP_SIZE);
  71
  72        err = ops->init(mlxsw_sp, tcam->priv, tcam);
  73        if (err)
  74                goto err_tcam_init;
  75
  76        return 0;
  77
  78err_tcam_init:
  79        kfree(tcam->used_groups);
  80err_alloc_used_groups:
  81        kfree(tcam->used_regions);
  82        return err;
  83}
  84
  85void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
  86                            struct mlxsw_sp_acl_tcam *tcam)
  87{
  88        const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
  89
  90        mutex_destroy(&tcam->lock);
  91        ops->fini(mlxsw_sp, tcam->priv);
  92        kfree(tcam->used_groups);
  93        kfree(tcam->used_regions);
  94}
  95
  96int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
  97                                   struct mlxsw_sp_acl_rule_info *rulei,
  98                                   u32 *priority, bool fillup_priority)
  99{
 100        u64 max_priority;
 101
 102        if (!fillup_priority) {
 103                *priority = 0;
 104                return 0;
 105        }
 106
 107        if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
 108                return -EIO;
 109
 110        /* Priority range is 1..cap_kvd_size-1. */
 111        max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
 112        if (rulei->priority >= max_priority)
 113                return -EINVAL;
 114
 115        /* Unlike in TC, in HW, higher number means higher priority. */
 116        *priority = max_priority - rulei->priority;
 117        return 0;
 118}
 119
 120static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
 121                                           u16 *p_id)
 122{
 123        u16 id;
 124
 125        id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
 126        if (id < tcam->max_regions) {
 127                __set_bit(id, tcam->used_regions);
 128                *p_id = id;
 129                return 0;
 130        }
 131        return -ENOBUFS;
 132}
 133
 134static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
 135                                            u16 id)
 136{
 137        __clear_bit(id, tcam->used_regions);
 138}
 139
 140static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
 141                                          u16 *p_id)
 142{
 143        u16 id;
 144
 145        id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
 146        if (id < tcam->max_groups) {
 147                __set_bit(id, tcam->used_groups);
 148                *p_id = id;
 149                return 0;
 150        }
 151        return -ENOBUFS;
 152}
 153
 154static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
 155                                           u16 id)
 156{
 157        __clear_bit(id, tcam->used_groups);
 158}
 159
 160struct mlxsw_sp_acl_tcam_pattern {
 161        const enum mlxsw_afk_element *elements;
 162        unsigned int elements_count;
 163};
 164
 165struct mlxsw_sp_acl_tcam_group {
 166        struct mlxsw_sp_acl_tcam *tcam;
 167        u16 id;
 168        struct mutex lock; /* guards region list updates */
 169        struct list_head region_list;
 170        unsigned int region_count;
 171};
 172
 173struct mlxsw_sp_acl_tcam_vgroup {
 174        struct mlxsw_sp_acl_tcam_group group;
 175        struct list_head vregion_list;
 176        struct rhashtable vchunk_ht;
 177        const struct mlxsw_sp_acl_tcam_pattern *patterns;
 178        unsigned int patterns_count;
 179        bool tmplt_elusage_set;
 180        struct mlxsw_afk_element_usage tmplt_elusage;
 181        bool vregion_rehash_enabled;
 182        unsigned int *p_min_prio;
 183        unsigned int *p_max_prio;
 184};
 185
 186struct mlxsw_sp_acl_tcam_rehash_ctx {
 187        void *hints_priv;
 188        bool this_is_rollback;
 189        struct mlxsw_sp_acl_tcam_vchunk *current_vchunk; /* vchunk being
 190                                                          * currently migrated.
 191                                                          */
 192        struct mlxsw_sp_acl_tcam_ventry *start_ventry; /* ventry to start
 193                                                        * migration from in
 194                                                        * a vchunk being
 195                                                        * currently migrated.
 196                                                        */
 197        struct mlxsw_sp_acl_tcam_ventry *stop_ventry; /* ventry to stop
 198                                                       * migration at
 199                                                       * a vchunk being
 200                                                       * currently migrated.
 201                                                       */
 202};
 203
 204struct mlxsw_sp_acl_tcam_vregion {
 205        struct mutex lock; /* Protects consistency of region, region2 pointers
 206                            * and vchunk_list.
 207                            */
 208        struct mlxsw_sp_acl_tcam_region *region;
 209        struct mlxsw_sp_acl_tcam_region *region2; /* Used during migration */
 210        struct list_head list; /* Member of a TCAM group */
 211        struct list_head tlist; /* Member of a TCAM */
 212        struct list_head vchunk_list; /* List of vchunks under this vregion */
 213        struct mlxsw_afk_key_info *key_info;
 214        struct mlxsw_sp_acl_tcam *tcam;
 215        struct mlxsw_sp_acl_tcam_vgroup *vgroup;
 216        struct {
 217                struct delayed_work dw;
 218                struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
 219        } rehash;
 220        struct mlxsw_sp *mlxsw_sp;
 221        unsigned int ref_count;
 222};
 223
 224struct mlxsw_sp_acl_tcam_vchunk;
 225
 226struct mlxsw_sp_acl_tcam_chunk {
 227        struct mlxsw_sp_acl_tcam_vchunk *vchunk;
 228        struct mlxsw_sp_acl_tcam_region *region;
 229        unsigned long priv[];
 230        /* priv has to be always the last item */
 231};
 232
 233struct mlxsw_sp_acl_tcam_vchunk {
 234        struct mlxsw_sp_acl_tcam_chunk *chunk;
 235        struct mlxsw_sp_acl_tcam_chunk *chunk2; /* Used during migration */
 236        struct list_head list; /* Member of a TCAM vregion */
 237        struct rhash_head ht_node; /* Member of a chunk HT */
 238        struct list_head ventry_list;
 239        unsigned int priority; /* Priority within the vregion and group */
 240        struct mlxsw_sp_acl_tcam_vgroup *vgroup;
 241        struct mlxsw_sp_acl_tcam_vregion *vregion;
 242        unsigned int ref_count;
 243};
 244
 245struct mlxsw_sp_acl_tcam_entry {
 246        struct mlxsw_sp_acl_tcam_ventry *ventry;
 247        struct mlxsw_sp_acl_tcam_chunk *chunk;
 248        unsigned long priv[];
 249        /* priv has to be always the last item */
 250};
 251
 252struct mlxsw_sp_acl_tcam_ventry {
 253        struct mlxsw_sp_acl_tcam_entry *entry;
 254        struct list_head list; /* Member of a TCAM vchunk */
 255        struct mlxsw_sp_acl_tcam_vchunk *vchunk;
 256        struct mlxsw_sp_acl_rule_info *rulei;
 257};
 258
 259static const struct rhashtable_params mlxsw_sp_acl_tcam_vchunk_ht_params = {
 260        .key_len = sizeof(unsigned int),
 261        .key_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, priority),
 262        .head_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, ht_node),
 263        .automatic_shrinking = true,
 264};
 265
 266static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
 267                                          struct mlxsw_sp_acl_tcam_group *group)
 268{
 269        struct mlxsw_sp_acl_tcam_region *region;
 270        char pagt_pl[MLXSW_REG_PAGT_LEN];
 271        int acl_index = 0;
 272
 273        mlxsw_reg_pagt_pack(pagt_pl, group->id);
 274        list_for_each_entry(region, &group->region_list, list) {
 275                bool multi = false;
 276
 277                /* Check if the next entry in the list has the same vregion. */
 278                if (region->list.next != &group->region_list &&
 279                    list_next_entry(region, list)->vregion == region->vregion)
 280                        multi = true;
 281                mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++,
 282                                           region->id, multi);
 283        }
 284        mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
 285        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
 286}
 287
 288static int
 289mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam *tcam,
 290                            struct mlxsw_sp_acl_tcam_group *group)
 291{
 292        int err;
 293
 294        group->tcam = tcam;
 295        INIT_LIST_HEAD(&group->region_list);
 296
 297        err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
 298        if (err)
 299                return err;
 300
 301        mutex_init(&group->lock);
 302
 303        return 0;
 304}
 305
 306static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp_acl_tcam_group *group)
 307{
 308        struct mlxsw_sp_acl_tcam *tcam = group->tcam;
 309
 310        mutex_destroy(&group->lock);
 311        mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
 312        WARN_ON(!list_empty(&group->region_list));
 313}
 314
 315static int
 316mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp,
 317                             struct mlxsw_sp_acl_tcam *tcam,
 318                             struct mlxsw_sp_acl_tcam_vgroup *vgroup,
 319                             const struct mlxsw_sp_acl_tcam_pattern *patterns,
 320                             unsigned int patterns_count,
 321                             struct mlxsw_afk_element_usage *tmplt_elusage,
 322                             bool vregion_rehash_enabled,
 323                             unsigned int *p_min_prio,
 324                             unsigned int *p_max_prio)
 325{
 326        int err;
 327
 328        vgroup->patterns = patterns;
 329        vgroup->patterns_count = patterns_count;
 330        vgroup->vregion_rehash_enabled = vregion_rehash_enabled;
 331        vgroup->p_min_prio = p_min_prio;
 332        vgroup->p_max_prio = p_max_prio;
 333
 334        if (tmplt_elusage) {
 335                vgroup->tmplt_elusage_set = true;
 336                memcpy(&vgroup->tmplt_elusage, tmplt_elusage,
 337                       sizeof(vgroup->tmplt_elusage));
 338        }
 339        INIT_LIST_HEAD(&vgroup->vregion_list);
 340
 341        err = mlxsw_sp_acl_tcam_group_add(tcam, &vgroup->group);
 342        if (err)
 343                return err;
 344
 345        err = rhashtable_init(&vgroup->vchunk_ht,
 346                              &mlxsw_sp_acl_tcam_vchunk_ht_params);
 347        if (err)
 348                goto err_rhashtable_init;
 349
 350        return 0;
 351
 352err_rhashtable_init:
 353        mlxsw_sp_acl_tcam_group_del(&vgroup->group);
 354        return err;
 355}
 356
 357static void
 358mlxsw_sp_acl_tcam_vgroup_del(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
 359{
 360        rhashtable_destroy(&vgroup->vchunk_ht);
 361        mlxsw_sp_acl_tcam_group_del(&vgroup->group);
 362        WARN_ON(!list_empty(&vgroup->vregion_list));
 363}
 364
 365static int
 366mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
 367                             struct mlxsw_sp_acl_tcam_group *group,
 368                             struct mlxsw_sp_port *mlxsw_sp_port,
 369                             bool ingress)
 370{
 371        char ppbt_pl[MLXSW_REG_PPBT_LEN];
 372
 373        mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
 374                                               MLXSW_REG_PXBT_E_EACL,
 375                            MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
 376                            group->id);
 377        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
 378}
 379
 380static void
 381mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
 382                               struct mlxsw_sp_acl_tcam_group *group,
 383                               struct mlxsw_sp_port *mlxsw_sp_port,
 384                               bool ingress)
 385{
 386        char ppbt_pl[MLXSW_REG_PPBT_LEN];
 387
 388        mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
 389                                               MLXSW_REG_PXBT_E_EACL,
 390                            MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
 391                            group->id);
 392        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
 393}
 394
 395static u16
 396mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
 397{
 398        return group->id;
 399}
 400
 401static unsigned int
 402mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
 403{
 404        struct mlxsw_sp_acl_tcam_vchunk *vchunk;
 405
 406        if (list_empty(&vregion->vchunk_list))
 407                return 0;
 408        /* As a priority of a vregion, return priority of the first vchunk */
 409        vchunk = list_first_entry(&vregion->vchunk_list,
 410                                  typeof(*vchunk), list);
 411        return vchunk->priority;
 412}
 413
 414static unsigned int
 415mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
 416{
 417        struct mlxsw_sp_acl_tcam_vchunk *vchunk;
 418
 419        if (list_empty(&vregion->vchunk_list))
 420                return 0;
 421        vchunk = list_last_entry(&vregion->vchunk_list,
 422                                 typeof(*vchunk), list);
 423        return vchunk->priority;
 424}
 425
 426static void
 427mlxsw_sp_acl_tcam_vgroup_prio_update(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
 428{
 429        struct mlxsw_sp_acl_tcam_vregion *vregion;
 430
 431        if (list_empty(&vgroup->vregion_list))
 432                return;
 433        vregion = list_first_entry(&vgroup->vregion_list,
 434                                   typeof(*vregion), list);
 435        *vgroup->p_min_prio = mlxsw_sp_acl_tcam_vregion_prio(vregion);
 436        vregion = list_last_entry(&vgroup->vregion_list,
 437                                  typeof(*vregion), list);
 438        *vgroup->p_max_prio = mlxsw_sp_acl_tcam_vregion_max_prio(vregion);
 439}
 440
 441static int
 442mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
 443                                      struct mlxsw_sp_acl_tcam_group *group,
 444                                      struct mlxsw_sp_acl_tcam_region *region,
 445                                      unsigned int priority,
 446                                      struct mlxsw_sp_acl_tcam_region *next_region)
 447{
 448        struct mlxsw_sp_acl_tcam_region *region2;
 449        struct list_head *pos;
 450        int err;
 451
 452        mutex_lock(&group->lock);
 453        if (group->region_count == group->tcam->max_group_size) {
 454                err = -ENOBUFS;
 455                goto err_region_count_check;
 456        }
 457
 458        if (next_region) {
 459                /* If the next region is defined, place the new one
 460                 * before it. The next one is a sibling.
 461                 */
 462                pos = &next_region->list;
 463        } else {
 464                /* Position the region inside the list according to priority */
 465                list_for_each(pos, &group->region_list) {
 466                        region2 = list_entry(pos, typeof(*region2), list);
 467                        if (mlxsw_sp_acl_tcam_vregion_prio(region2->vregion) >
 468                            priority)
 469                                break;
 470                }
 471        }
 472        list_add_tail(&region->list, pos);
 473        region->group = group;
 474
 475        err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
 476        if (err)
 477                goto err_group_update;
 478
 479        group->region_count++;
 480        mutex_unlock(&group->lock);
 481        return 0;
 482
 483err_group_update:
 484        list_del(&region->list);
 485err_region_count_check:
 486        mutex_unlock(&group->lock);
 487        return err;
 488}
 489
 490static void
 491mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
 492                                      struct mlxsw_sp_acl_tcam_region *region)
 493{
 494        struct mlxsw_sp_acl_tcam_group *group = region->group;
 495
 496        mutex_lock(&group->lock);
 497        list_del(&region->list);
 498        group->region_count--;
 499        mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
 500        mutex_unlock(&group->lock);
 501}
 502
 503static int
 504mlxsw_sp_acl_tcam_vgroup_vregion_attach(struct mlxsw_sp *mlxsw_sp,
 505                                        struct mlxsw_sp_acl_tcam_vgroup *vgroup,
 506                                        struct mlxsw_sp_acl_tcam_vregion *vregion,
 507                                        unsigned int priority)
 508{
 509        struct mlxsw_sp_acl_tcam_vregion *vregion2;
 510        struct list_head *pos;
 511        int err;
 512
 513        /* Position the vregion inside the list according to priority */
 514        list_for_each(pos, &vgroup->vregion_list) {
 515                vregion2 = list_entry(pos, typeof(*vregion2), list);
 516                if (mlxsw_sp_acl_tcam_vregion_prio(vregion2) > priority)
 517                        break;
 518        }
 519        list_add_tail(&vregion->list, pos);
 520
 521        err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, &vgroup->group,
 522                                                    vregion->region,
 523                                                    priority, NULL);
 524        if (err)
 525                goto err_region_attach;
 526
 527        return 0;
 528
 529err_region_attach:
 530        list_del(&vregion->list);
 531        return err;
 532}
 533
 534static void
 535mlxsw_sp_acl_tcam_vgroup_vregion_detach(struct mlxsw_sp *mlxsw_sp,
 536                                        struct mlxsw_sp_acl_tcam_vregion *vregion)
 537{
 538        list_del(&vregion->list);
 539        if (vregion->region2)
 540                mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp,
 541                                                      vregion->region2);
 542        mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, vregion->region);
 543}
 544
 545static struct mlxsw_sp_acl_tcam_vregion *
 546mlxsw_sp_acl_tcam_vgroup_vregion_find(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
 547                                      unsigned int priority,
 548                                      struct mlxsw_afk_element_usage *elusage,
 549                                      bool *p_need_split)
 550{
 551        struct mlxsw_sp_acl_tcam_vregion *vregion, *vregion2;
 552        struct list_head *pos;
 553        bool issubset;
 554
 555        list_for_each(pos, &vgroup->vregion_list) {
 556                vregion = list_entry(pos, typeof(*vregion), list);
 557
 558                /* First, check if the requested priority does not rather belong
 559                 * under some of the next vregions.
 560                 */
 561                if (pos->next != &vgroup->vregion_list) { /* not last */
 562                        vregion2 = list_entry(pos->next, typeof(*vregion2),
 563                                              list);
 564                        if (priority >=
 565                            mlxsw_sp_acl_tcam_vregion_prio(vregion2))
 566                                continue;
 567                }
 568
 569                issubset = mlxsw_afk_key_info_subset(vregion->key_info,
 570                                                     elusage);
 571
 572                /* If requested element usage would not fit and the priority
 573                 * is lower than the currently inspected vregion we cannot
 574                 * use this region, so return NULL to indicate new vregion has
 575                 * to be created.
 576                 */
 577                if (!issubset &&
 578                    priority < mlxsw_sp_acl_tcam_vregion_prio(vregion))
 579                        return NULL;
 580
 581                /* If requested element usage would not fit and the priority
 582                 * is higher than the currently inspected vregion we cannot
 583                 * use this vregion. There is still some hope that the next
 584                 * vregion would be the fit. So let it be processed and
 585                 * eventually break at the check right above this.
 586                 */
 587                if (!issubset &&
 588                    priority > mlxsw_sp_acl_tcam_vregion_max_prio(vregion))
 589                        continue;
 590
 591                /* Indicate if the vregion needs to be split in order to add
 592                 * the requested priority. Split is needed when requested
 593                 * element usage won't fit into the found vregion.
 594                 */
 595                *p_need_split = !issubset;
 596                return vregion;
 597        }
 598        return NULL; /* New vregion has to be created. */
 599}
 600
 601static void
 602mlxsw_sp_acl_tcam_vgroup_use_patterns(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
 603                                      struct mlxsw_afk_element_usage *elusage,
 604                                      struct mlxsw_afk_element_usage *out)
 605{
 606        const struct mlxsw_sp_acl_tcam_pattern *pattern;
 607        int i;
 608
 609        /* In case the template is set, we don't have to look up the pattern
 610         * and just use the template.
 611         */
 612        if (vgroup->tmplt_elusage_set) {
 613                memcpy(out, &vgroup->tmplt_elusage, sizeof(*out));
 614                WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
 615                return;
 616        }
 617
 618        for (i = 0; i < vgroup->patterns_count; i++) {
 619                pattern = &vgroup->patterns[i];
 620                mlxsw_afk_element_usage_fill(out, pattern->elements,
 621                                             pattern->elements_count);
 622                if (mlxsw_afk_element_usage_subset(elusage, out))
 623                        return;
 624        }
 625        memcpy(out, elusage, sizeof(*out));
 626}
 627
 628static int
 629mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
 630                               struct mlxsw_sp_acl_tcam_region *region)
 631{
 632        struct mlxsw_afk_key_info *key_info = region->key_info;
 633        char ptar_pl[MLXSW_REG_PTAR_LEN];
 634        unsigned int encodings_count;
 635        int i;
 636        int err;
 637
 638        mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
 639                            region->key_type,
 640                            MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
 641                            region->id, region->tcam_region_info);
 642        encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
 643        for (i = 0; i < encodings_count; i++) {
 644                u16 encoding;
 645
 646                encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
 647                mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
 648        }
 649        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
 650        if (err)
 651                return err;
 652        mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
 653        return 0;
 654}
 655
 656static void
 657mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
 658                              struct mlxsw_sp_acl_tcam_region *region)
 659{
 660        char ptar_pl[MLXSW_REG_PTAR_LEN];
 661
 662        mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
 663                            region->key_type, 0, region->id,
 664                            region->tcam_region_info);
 665        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
 666}
 667
 668static int
 669mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
 670                                struct mlxsw_sp_acl_tcam_region *region)
 671{
 672        char pacl_pl[MLXSW_REG_PACL_LEN];
 673
 674        mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
 675                            region->tcam_region_info);
 676        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
 677}
 678
 679static void
 680mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
 681                                 struct mlxsw_sp_acl_tcam_region *region)
 682{
 683        char pacl_pl[MLXSW_REG_PACL_LEN];
 684
 685        mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
 686                            region->tcam_region_info);
 687        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
 688}
 689
 690static struct mlxsw_sp_acl_tcam_region *
 691mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
 692                                struct mlxsw_sp_acl_tcam *tcam,
 693                                struct mlxsw_sp_acl_tcam_vregion *vregion,
 694                                void *hints_priv)
 695{
 696        const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
 697        struct mlxsw_sp_acl_tcam_region *region;
 698        int err;
 699
 700        region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
 701        if (!region)
 702                return ERR_PTR(-ENOMEM);
 703        region->mlxsw_sp = mlxsw_sp;
 704        region->vregion = vregion;
 705        region->key_info = vregion->key_info;
 706
 707        err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
 708        if (err)
 709                goto err_region_id_get;
 710
 711        err = ops->region_associate(mlxsw_sp, region);
 712        if (err)
 713                goto err_tcam_region_associate;
 714
 715        region->key_type = ops->key_type;
 716        err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
 717        if (err)
 718                goto err_tcam_region_alloc;
 719
 720        err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
 721        if (err)
 722                goto err_tcam_region_enable;
 723
 724        err = ops->region_init(mlxsw_sp, region->priv, tcam->priv,
 725                               region, hints_priv);
 726        if (err)
 727                goto err_tcam_region_init;
 728
 729        return region;
 730
 731err_tcam_region_init:
 732        mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
 733err_tcam_region_enable:
 734        mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
 735err_tcam_region_alloc:
 736err_tcam_region_associate:
 737        mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
 738err_region_id_get:
 739        kfree(region);
 740        return ERR_PTR(err);
 741}
 742
 743static void
 744mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
 745                                 struct mlxsw_sp_acl_tcam_region *region)
 746{
 747        const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
 748
 749        ops->region_fini(mlxsw_sp, region->priv);
 750        mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
 751        mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
 752        mlxsw_sp_acl_tcam_region_id_put(region->group->tcam,
 753                                        region->id);
 754        kfree(region);
 755}
 756
 757static void
 758mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion *vregion)
 759{
 760        unsigned long interval = vregion->tcam->vregion_rehash_intrvl;
 761
 762        if (!interval)
 763                return;
 764        mlxsw_core_schedule_dw(&vregion->rehash.dw,
 765                               msecs_to_jiffies(interval));
 766}
 767
 768static void
 769mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
 770                                 struct mlxsw_sp_acl_tcam_vregion *vregion,
 771                                 int *credits);
 772
 773static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
 774{
 775        struct mlxsw_sp_acl_tcam_vregion *vregion =
 776                container_of(work, struct mlxsw_sp_acl_tcam_vregion,
 777                             rehash.dw.work);
 778        int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
 779
 780        mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
 781        if (credits < 0)
 782                /* Rehash gone out of credits so it was interrupted.
 783                 * Schedule the work as soon as possible to continue.
 784                 */
 785                mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
 786        else
 787                mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
 788}
 789
 790static void
 791mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
 792{
 793        struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
 794
 795        /* If a rule was added or deleted from vchunk which is currently
 796         * under rehash migration, we have to reset the ventry pointers
 797         * to make sure all rules are properly migrated.
 798         */
 799        if (vregion->rehash.ctx.current_vchunk == vchunk) {
 800                vregion->rehash.ctx.start_ventry = NULL;
 801                vregion->rehash.ctx.stop_ventry = NULL;
 802        }
 803}
 804
 805static void
 806mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *vregion)
 807{
 808        /* If a chunk was added or deleted from vregion we have to reset
 809         * the current chunk pointer to make sure all chunks
 810         * are properly migrated.
 811         */
 812        vregion->rehash.ctx.current_vchunk = NULL;
 813}
 814
 815static struct mlxsw_sp_acl_tcam_vregion *
 816mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
 817                                 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
 818                                 unsigned int priority,
 819                                 struct mlxsw_afk_element_usage *elusage)
 820{
 821        const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
 822        struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
 823        struct mlxsw_sp_acl_tcam *tcam = vgroup->group.tcam;
 824        struct mlxsw_sp_acl_tcam_vregion *vregion;
 825        int err;
 826
 827        vregion = kzalloc(sizeof(*vregion), GFP_KERNEL);
 828        if (!vregion)
 829                return ERR_PTR(-ENOMEM);
 830        INIT_LIST_HEAD(&vregion->vchunk_list);
 831        mutex_init(&vregion->lock);
 832        vregion->tcam = tcam;
 833        vregion->mlxsw_sp = mlxsw_sp;
 834        vregion->vgroup = vgroup;
 835        vregion->ref_count = 1;
 836
 837        vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
 838        if (IS_ERR(vregion->key_info)) {
 839                err = PTR_ERR(vregion->key_info);
 840                goto err_key_info_get;
 841        }
 842
 843        vregion->region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, tcam,
 844                                                          vregion, NULL);
 845        if (IS_ERR(vregion->region)) {
 846                err = PTR_ERR(vregion->region);
 847                goto err_region_create;
 848        }
 849
 850        err = mlxsw_sp_acl_tcam_vgroup_vregion_attach(mlxsw_sp, vgroup, vregion,
 851                                                      priority);
 852        if (err)
 853                goto err_vgroup_vregion_attach;
 854
 855        if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
 856                /* Create the delayed work for vregion periodic rehash */
 857                INIT_DELAYED_WORK(&vregion->rehash.dw,
 858                                  mlxsw_sp_acl_tcam_vregion_rehash_work);
 859                mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
 860                mutex_lock(&tcam->lock);
 861                list_add_tail(&vregion->tlist, &tcam->vregion_list);
 862                mutex_unlock(&tcam->lock);
 863        }
 864
 865        return vregion;
 866
 867err_vgroup_vregion_attach:
 868        mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
 869err_region_create:
 870        mlxsw_afk_key_info_put(vregion->key_info);
 871err_key_info_get:
 872        kfree(vregion);
 873        return ERR_PTR(err);
 874}
 875
 876static void
 877mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
 878                                  struct mlxsw_sp_acl_tcam_vregion *vregion)
 879{
 880        const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
 881        struct mlxsw_sp_acl_tcam_vgroup *vgroup = vregion->vgroup;
 882        struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
 883
 884        if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
 885                mutex_lock(&tcam->lock);
 886                list_del(&vregion->tlist);
 887                mutex_unlock(&tcam->lock);
 888                cancel_delayed_work_sync(&vregion->rehash.dw);
 889        }
 890        mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
 891        if (vregion->region2)
 892                mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2);
 893        mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
 894        mlxsw_afk_key_info_put(vregion->key_info);
 895        mutex_destroy(&vregion->lock);
 896        kfree(vregion);
 897}
 898
 899u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp,
 900                                                struct mlxsw_sp_acl_tcam *tcam)
 901{
 902        const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
 903        u32 vregion_rehash_intrvl;
 904
 905        if (WARN_ON(!ops->region_rehash_hints_get))
 906                return 0;
 907        vregion_rehash_intrvl = tcam->vregion_rehash_intrvl;
 908        return vregion_rehash_intrvl;
 909}
 910
 911int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
 912                                                struct mlxsw_sp_acl_tcam *tcam,
 913                                                u32 val)
 914{
 915        const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
 916        struct mlxsw_sp_acl_tcam_vregion *vregion;
 917
 918        if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
 919                return -EINVAL;
 920        if (WARN_ON(!ops->region_rehash_hints_get))
 921                return -EOPNOTSUPP;
 922        tcam->vregion_rehash_intrvl = val;
 923        mutex_lock(&tcam->lock);
 924        list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
 925                if (val)
 926                        mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
 927                else
 928                        cancel_delayed_work_sync(&vregion->rehash.dw);
 929        }
 930        mutex_unlock(&tcam->lock);
 931        return 0;
 932}
 933
 934static struct mlxsw_sp_acl_tcam_vregion *
 935mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
 936                              struct mlxsw_sp_acl_tcam_vgroup *vgroup,
 937                              unsigned int priority,
 938                              struct mlxsw_afk_element_usage *elusage)
 939{
 940        struct mlxsw_afk_element_usage vregion_elusage;
 941        struct mlxsw_sp_acl_tcam_vregion *vregion;
 942        bool need_split;
 943
 944        vregion = mlxsw_sp_acl_tcam_vgroup_vregion_find(vgroup, priority,
 945                                                        elusage, &need_split);
 946        if (vregion) {
 947                if (need_split) {
 948                        /* According to priority, new vchunk should belong to
 949                         * an existing vregion. However, this vchunk needs
 950                         * elements that vregion does not contain. We need
 951                         * to split the existing vregion into two and create
 952                         * a new vregion for the new vchunk in between.
 953                         * This is not supported now.
 954                         */
 955                        return ERR_PTR(-EOPNOTSUPP);
 956                }
 957                vregion->ref_count++;
 958                return vregion;
 959        }
 960
 961        mlxsw_sp_acl_tcam_vgroup_use_patterns(vgroup, elusage,
 962                                              &vregion_elusage);
 963
 964        return mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp, vgroup, priority,
 965                                                &vregion_elusage);
 966}
 967
 968static void
 969mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
 970                              struct mlxsw_sp_acl_tcam_vregion *vregion)
 971{
 972        if (--vregion->ref_count)
 973                return;
 974        mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
 975}
 976
 977static struct mlxsw_sp_acl_tcam_chunk *
 978mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
 979                               struct mlxsw_sp_acl_tcam_vchunk *vchunk,
 980                               struct mlxsw_sp_acl_tcam_region *region)
 981{
 982        const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
 983        struct mlxsw_sp_acl_tcam_chunk *chunk;
 984
 985        chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
 986        if (!chunk)
 987                return ERR_PTR(-ENOMEM);
 988        chunk->vchunk = vchunk;
 989        chunk->region = region;
 990
 991        ops->chunk_init(region->priv, chunk->priv, vchunk->priority);
 992        return chunk;
 993}
 994
 995static void
 996mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
 997                                struct mlxsw_sp_acl_tcam_chunk *chunk)
 998{
 999        const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1000
1001        ops->chunk_fini(chunk->priv);
1002        kfree(chunk);
1003}
1004
1005static struct mlxsw_sp_acl_tcam_vchunk *
1006mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
1007                                struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1008                                unsigned int priority,
1009                                struct mlxsw_afk_element_usage *elusage)
1010{
1011        struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2;
1012        struct mlxsw_sp_acl_tcam_vregion *vregion;
1013        struct list_head *pos;
1014        int err;
1015
1016        if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
1017                return ERR_PTR(-EINVAL);
1018
1019        vchunk = kzalloc(sizeof(*vchunk), GFP_KERNEL);
1020        if (!vchunk)
1021                return ERR_PTR(-ENOMEM);
1022        INIT_LIST_HEAD(&vchunk->ventry_list);
1023        vchunk->priority = priority;
1024        vchunk->vgroup = vgroup;
1025        vchunk->ref_count = 1;
1026
1027        vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
1028                                                priority, elusage);
1029        if (IS_ERR(vregion)) {
1030                err = PTR_ERR(vregion);
1031                goto err_vregion_get;
1032        }
1033
1034        vchunk->vregion = vregion;
1035
1036        err = rhashtable_insert_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1037                                     mlxsw_sp_acl_tcam_vchunk_ht_params);
1038        if (err)
1039                goto err_rhashtable_insert;
1040
1041        mutex_lock(&vregion->lock);
1042        vchunk->chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk,
1043                                                       vchunk->vregion->region);
1044        if (IS_ERR(vchunk->chunk)) {
1045                mutex_unlock(&vregion->lock);
1046                err = PTR_ERR(vchunk->chunk);
1047                goto err_chunk_create;
1048        }
1049
1050        mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
1051
1052        /* Position the vchunk inside the list according to priority */
1053        list_for_each(pos, &vregion->vchunk_list) {
1054                vchunk2 = list_entry(pos, typeof(*vchunk2), list);
1055                if (vchunk2->priority > priority)
1056                        break;
1057        }
1058        list_add_tail(&vchunk->list, pos);
1059        mutex_unlock(&vregion->lock);
1060        mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
1061
1062        return vchunk;
1063
1064err_chunk_create:
1065        rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1066                               mlxsw_sp_acl_tcam_vchunk_ht_params);
1067err_rhashtable_insert:
1068        mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vregion);
1069err_vregion_get:
1070        kfree(vchunk);
1071        return ERR_PTR(err);
1072}
1073
1074static void
1075mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
1076                                 struct mlxsw_sp_acl_tcam_vchunk *vchunk)
1077{
1078        struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
1079        struct mlxsw_sp_acl_tcam_vgroup *vgroup = vchunk->vgroup;
1080
1081        mutex_lock(&vregion->lock);
1082        mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
1083        list_del(&vchunk->list);
1084        if (vchunk->chunk2)
1085                mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
1086        mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk);
1087        mutex_unlock(&vregion->lock);
1088        rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1089                               mlxsw_sp_acl_tcam_vchunk_ht_params);
1090        mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vchunk->vregion);
1091        kfree(vchunk);
1092        mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
1093}
1094
1095static struct mlxsw_sp_acl_tcam_vchunk *
1096mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
1097                             struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1098                             unsigned int priority,
1099                             struct mlxsw_afk_element_usage *elusage)
1100{
1101        struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1102
1103        vchunk = rhashtable_lookup_fast(&vgroup->vchunk_ht, &priority,
1104                                        mlxsw_sp_acl_tcam_vchunk_ht_params);
1105        if (vchunk) {
1106                if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
1107                                                       elusage)))
1108                        return ERR_PTR(-EINVAL);
1109                vchunk->ref_count++;
1110                return vchunk;
1111        }
1112        return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
1113                                               priority, elusage);
1114}
1115
1116static void
1117mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
1118                             struct mlxsw_sp_acl_tcam_vchunk *vchunk)
1119{
1120        if (--vchunk->ref_count)
1121                return;
1122        mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
1123}
1124
1125static struct mlxsw_sp_acl_tcam_entry *
1126mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp *mlxsw_sp,
1127                               struct mlxsw_sp_acl_tcam_ventry *ventry,
1128                               struct mlxsw_sp_acl_tcam_chunk *chunk)
1129{
1130        const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1131        struct mlxsw_sp_acl_tcam_entry *entry;
1132        int err;
1133
1134        entry = kzalloc(sizeof(*entry) + ops->entry_priv_size, GFP_KERNEL);
1135        if (!entry)
1136                return ERR_PTR(-ENOMEM);
1137        entry->ventry = ventry;
1138        entry->chunk = chunk;
1139
1140        err = ops->entry_add(mlxsw_sp, chunk->region->priv, chunk->priv,
1141                             entry->priv, ventry->rulei);
1142        if (err)
1143                goto err_entry_add;
1144
1145        return entry;
1146
1147err_entry_add:
1148        kfree(entry);
1149        return ERR_PTR(err);
1150}
1151
1152static void mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1153                                            struct mlxsw_sp_acl_tcam_entry *entry)
1154{
1155        const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1156
1157        ops->entry_del(mlxsw_sp, entry->chunk->region->priv,
1158                       entry->chunk->priv, entry->priv);
1159        kfree(entry);
1160}
1161
1162static int
1163mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
1164                                       struct mlxsw_sp_acl_tcam_region *region,
1165                                       struct mlxsw_sp_acl_tcam_entry *entry,
1166                                       struct mlxsw_sp_acl_rule_info *rulei)
1167{
1168        const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1169
1170        return ops->entry_action_replace(mlxsw_sp, region->priv,
1171                                         entry->priv, rulei);
1172}
1173
1174static int
1175mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
1176                                     struct mlxsw_sp_acl_tcam_entry *entry,
1177                                     bool *activity)
1178{
1179        const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1180
1181        return ops->entry_activity_get(mlxsw_sp, entry->chunk->region->priv,
1182                                       entry->priv, activity);
1183}
1184
1185static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp,
1186                                        struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1187                                        struct mlxsw_sp_acl_tcam_ventry *ventry,
1188                                        struct mlxsw_sp_acl_rule_info *rulei)
1189{
1190        struct mlxsw_sp_acl_tcam_vregion *vregion;
1191        struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1192        int err;
1193
1194        vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, vgroup, rulei->priority,
1195                                              &rulei->values.elusage);
1196        if (IS_ERR(vchunk))
1197                return PTR_ERR(vchunk);
1198
1199        ventry->vchunk = vchunk;
1200        ventry->rulei = rulei;
1201        vregion = vchunk->vregion;
1202
1203        mutex_lock(&vregion->lock);
1204        ventry->entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry,
1205                                                       vchunk->chunk);
1206        if (IS_ERR(ventry->entry)) {
1207                mutex_unlock(&vregion->lock);
1208                err = PTR_ERR(ventry->entry);
1209                goto err_entry_create;
1210        }
1211
1212        list_add_tail(&ventry->list, &vchunk->ventry_list);
1213        mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
1214        mutex_unlock(&vregion->lock);
1215
1216        return 0;
1217
1218err_entry_create:
1219        mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1220        return err;
1221}
1222
1223static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp,
1224                                         struct mlxsw_sp_acl_tcam_ventry *ventry)
1225{
1226        struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1227        struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
1228
1229        mutex_lock(&vregion->lock);
1230        mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
1231        list_del(&ventry->list);
1232        mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1233        mutex_unlock(&vregion->lock);
1234        mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1235}
1236
1237static int
1238mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp *mlxsw_sp,
1239                                        struct mlxsw_sp_acl_tcam_ventry *ventry,
1240                                        struct mlxsw_sp_acl_rule_info *rulei)
1241{
1242        struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1243
1244        return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp,
1245                                                      vchunk->vregion->region,
1246                                                      ventry->entry, rulei);
1247}
1248
1249static int
1250mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
1251                                      struct mlxsw_sp_acl_tcam_ventry *ventry,
1252                                      bool *activity)
1253{
1254        return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
1255                                                    ventry->entry, activity);
1256}
1257
1258static int
1259mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp,
1260                                 struct mlxsw_sp_acl_tcam_ventry *ventry,
1261                                 struct mlxsw_sp_acl_tcam_chunk *chunk,
1262                                 int *credits)
1263{
1264        struct mlxsw_sp_acl_tcam_entry *new_entry;
1265
1266        /* First check if the entry is not already where we want it to be. */
1267        if (ventry->entry->chunk == chunk)
1268                return 0;
1269
1270        if (--(*credits) < 0)
1271                return 0;
1272
1273        new_entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk);
1274        if (IS_ERR(new_entry))
1275                return PTR_ERR(new_entry);
1276        mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1277        ventry->entry = new_entry;
1278        return 0;
1279}
1280
1281static int
1282mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
1283                                       struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1284                                       struct mlxsw_sp_acl_tcam_region *region,
1285                                       struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1286{
1287        struct mlxsw_sp_acl_tcam_chunk *new_chunk;
1288
1289        new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
1290        if (IS_ERR(new_chunk))
1291                return PTR_ERR(new_chunk);
1292        vchunk->chunk2 = vchunk->chunk;
1293        vchunk->chunk = new_chunk;
1294        ctx->current_vchunk = vchunk;
1295        ctx->start_ventry = NULL;
1296        ctx->stop_ventry = NULL;
1297        return 0;
1298}
1299
1300static void
1301mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
1302                                     struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1303                                     struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1304{
1305        mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
1306        vchunk->chunk2 = NULL;
1307        ctx->current_vchunk = NULL;
1308}
1309
1310static int
1311mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
1312                                     struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1313                                     struct mlxsw_sp_acl_tcam_region *region,
1314                                     struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1315                                     int *credits)
1316{
1317        struct mlxsw_sp_acl_tcam_ventry *ventry;
1318        int err;
1319
1320        if (vchunk->chunk->region != region) {
1321                err = mlxsw_sp_acl_tcam_vchunk_migrate_start(mlxsw_sp, vchunk,
1322                                                             region, ctx);
1323                if (err)
1324                        return err;
1325        } else if (!vchunk->chunk2) {
1326                /* The chunk is already as it should be, nothing to do. */
1327                return 0;
1328        }
1329
1330        /* If the migration got interrupted, we have the ventry to start from
1331         * stored in context.
1332         */
1333        if (ctx->start_ventry)
1334                ventry = ctx->start_ventry;
1335        else
1336                ventry = list_first_entry(&vchunk->ventry_list,
1337                                          typeof(*ventry), list);
1338
1339        list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
1340                /* During rollback, once we reach the ventry that failed
1341                 * to migrate, we are done.
1342                 */
1343                if (ventry == ctx->stop_ventry)
1344                        break;
1345
1346                err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
1347                                                       vchunk->chunk, credits);
1348                if (err) {
1349                        if (ctx->this_is_rollback) {
1350                                /* Save the ventry which we ended with and try
1351                                 * to continue later on.
1352                                 */
1353                                ctx->start_ventry = ventry;
1354                                return err;
1355                        }
1356                        /* Swap the chunk and chunk2 pointers so the follow-up
1357                         * rollback call will see the original chunk pointer
1358                         * in vchunk->chunk.
1359                         */
1360                        swap(vchunk->chunk, vchunk->chunk2);
1361                        /* The rollback has to be done from beginning of the
1362                         * chunk, that is why we have to null the start_ventry.
1363                         * However, we know where to stop the rollback,
1364                         * at the current ventry.
1365                         */
1366                        ctx->start_ventry = NULL;
1367                        ctx->stop_ventry = ventry;
1368                        return err;
1369                } else if (*credits < 0) {
1370                        /* We are out of credits, the rest of the ventries
1371                         * will be migrated later. Save the ventry
1372                         * which we ended with.
1373                         */
1374                        ctx->start_ventry = ventry;
1375                        return 0;
1376                }
1377        }
1378
1379        mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
1380        return 0;
1381}
1382
1383static int
1384mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
1385                                     struct mlxsw_sp_acl_tcam_vregion *vregion,
1386                                     struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1387                                     int *credits)
1388{
1389        struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1390        int err;
1391
1392        /* If the migration got interrupted, we have the vchunk
1393         * we are working on stored in context.
1394         */
1395        if (ctx->current_vchunk)
1396                vchunk = ctx->current_vchunk;
1397        else
1398                vchunk = list_first_entry(&vregion->vchunk_list,
1399                                          typeof(*vchunk), list);
1400
1401        list_for_each_entry_from(vchunk, &vregion->vchunk_list, list) {
1402                err = mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk,
1403                                                           vregion->region,
1404                                                           ctx, credits);
1405                if (err || *credits < 0)
1406                        return err;
1407        }
1408        return 0;
1409}
1410
1411static int
1412mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
1413                                  struct mlxsw_sp_acl_tcam_vregion *vregion,
1414                                  struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1415                                  int *credits)
1416{
1417        int err, err2;
1418
1419        trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
1420        mutex_lock(&vregion->lock);
1421        err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1422                                                   ctx, credits);
1423        if (err) {
1424                /* In case migration was not successful, we need to swap
1425                 * so the original region pointer is assigned again
1426                 * to vregion->region.
1427                 */
1428                swap(vregion->region, vregion->region2);
1429                ctx->current_vchunk = NULL;
1430                ctx->this_is_rollback = true;
1431                err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1432                                                            ctx, credits);
1433                if (err2) {
1434                        trace_mlxsw_sp_acl_tcam_vregion_rehash_rollback_failed(mlxsw_sp,
1435                                                                               vregion);
1436                        dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
1437                        /* Let the rollback to be continued later on. */
1438                }
1439        }
1440        mutex_unlock(&vregion->lock);
1441        trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
1442        return err;
1443}
1444
1445static bool
1446mlxsw_sp_acl_tcam_vregion_rehash_in_progress(const struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1447{
1448        return ctx->hints_priv;
1449}
1450
1451static int
1452mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
1453                                       struct mlxsw_sp_acl_tcam_vregion *vregion,
1454                                       struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1455{
1456        const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1457        unsigned int priority = mlxsw_sp_acl_tcam_vregion_prio(vregion);
1458        struct mlxsw_sp_acl_tcam_region *new_region;
1459        void *hints_priv;
1460        int err;
1461
1462        trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion);
1463
1464        hints_priv = ops->region_rehash_hints_get(vregion->region->priv);
1465        if (IS_ERR(hints_priv))
1466                return PTR_ERR(hints_priv);
1467
1468        new_region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, vregion->tcam,
1469                                                     vregion, hints_priv);
1470        if (IS_ERR(new_region)) {
1471                err = PTR_ERR(new_region);
1472                goto err_region_create;
1473        }
1474
1475        /* vregion->region contains the pointer to the new region
1476         * we are going to migrate to.
1477         */
1478        vregion->region2 = vregion->region;
1479        vregion->region = new_region;
1480        err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp,
1481                                                    vregion->region2->group,
1482                                                    new_region, priority,
1483                                                    vregion->region2);
1484        if (err)
1485                goto err_group_region_attach;
1486
1487        ctx->hints_priv = hints_priv;
1488        ctx->this_is_rollback = false;
1489
1490        return 0;
1491
1492err_group_region_attach:
1493        vregion->region = vregion->region2;
1494        vregion->region2 = NULL;
1495        mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, new_region);
1496err_region_create:
1497        ops->region_rehash_hints_put(hints_priv);
1498        return err;
1499}
1500
1501static void
1502mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp,
1503                                     struct mlxsw_sp_acl_tcam_vregion *vregion,
1504                                     struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1505{
1506        struct mlxsw_sp_acl_tcam_region *unused_region = vregion->region2;
1507        const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1508
1509        vregion->region2 = NULL;
1510        mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
1511        mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
1512        ops->region_rehash_hints_put(ctx->hints_priv);
1513        ctx->hints_priv = NULL;
1514}
1515
1516static void
1517mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
1518                                 struct mlxsw_sp_acl_tcam_vregion *vregion,
1519                                 int *credits)
1520{
1521        struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
1522        int err;
1523
1524        /* Check if the previous rehash work was interrupted
1525         * which means we have to continue it now.
1526         * If not, start a new rehash.
1527         */
1528        if (!mlxsw_sp_acl_tcam_vregion_rehash_in_progress(ctx)) {
1529                err = mlxsw_sp_acl_tcam_vregion_rehash_start(mlxsw_sp,
1530                                                             vregion, ctx);
1531                if (err) {
1532                        if (err != -EAGAIN)
1533                                dev_err(mlxsw_sp->bus_info->dev, "Failed get rehash hints\n");
1534                        return;
1535                }
1536        }
1537
1538        err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
1539                                                ctx, credits);
1540        if (err) {
1541                dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
1542        }
1543
1544        if (*credits >= 0)
1545                mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp, vregion, ctx);
1546}
1547
1548static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
1549        MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
1550        MLXSW_AFK_ELEMENT_DMAC_32_47,
1551        MLXSW_AFK_ELEMENT_DMAC_0_31,
1552        MLXSW_AFK_ELEMENT_SMAC_32_47,
1553        MLXSW_AFK_ELEMENT_SMAC_0_31,
1554        MLXSW_AFK_ELEMENT_ETHERTYPE,
1555        MLXSW_AFK_ELEMENT_IP_PROTO,
1556        MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1557        MLXSW_AFK_ELEMENT_DST_IP_0_31,
1558        MLXSW_AFK_ELEMENT_DST_L4_PORT,
1559        MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1560        MLXSW_AFK_ELEMENT_VID,
1561        MLXSW_AFK_ELEMENT_PCP,
1562        MLXSW_AFK_ELEMENT_TCP_FLAGS,
1563        MLXSW_AFK_ELEMENT_IP_TTL_,
1564        MLXSW_AFK_ELEMENT_IP_ECN,
1565        MLXSW_AFK_ELEMENT_IP_DSCP,
1566};
1567
1568static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
1569        MLXSW_AFK_ELEMENT_ETHERTYPE,
1570        MLXSW_AFK_ELEMENT_IP_PROTO,
1571        MLXSW_AFK_ELEMENT_SRC_IP_96_127,
1572        MLXSW_AFK_ELEMENT_SRC_IP_64_95,
1573        MLXSW_AFK_ELEMENT_SRC_IP_32_63,
1574        MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1575        MLXSW_AFK_ELEMENT_DST_IP_96_127,
1576        MLXSW_AFK_ELEMENT_DST_IP_64_95,
1577        MLXSW_AFK_ELEMENT_DST_IP_32_63,
1578        MLXSW_AFK_ELEMENT_DST_IP_0_31,
1579        MLXSW_AFK_ELEMENT_DST_L4_PORT,
1580        MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1581};
1582
1583static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
1584        {
1585                .elements = mlxsw_sp_acl_tcam_pattern_ipv4,
1586                .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
1587        },
1588        {
1589                .elements = mlxsw_sp_acl_tcam_pattern_ipv6,
1590                .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
1591        },
1592};
1593
1594#define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
1595        ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
1596
1597struct mlxsw_sp_acl_tcam_flower_ruleset {
1598        struct mlxsw_sp_acl_tcam_vgroup vgroup;
1599};
1600
1601struct mlxsw_sp_acl_tcam_flower_rule {
1602        struct mlxsw_sp_acl_tcam_ventry ventry;
1603};
1604
1605static int
1606mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1607                                     struct mlxsw_sp_acl_tcam *tcam,
1608                                     void *ruleset_priv,
1609                                     struct mlxsw_afk_element_usage *tmplt_elusage,
1610                                     unsigned int *p_min_prio,
1611                                     unsigned int *p_max_prio)
1612{
1613        struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1614
1615        return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1616                                            mlxsw_sp_acl_tcam_patterns,
1617                                            MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1618                                            tmplt_elusage, true,
1619                                            p_min_prio, p_max_prio);
1620}
1621
1622static void
1623mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
1624                                     void *ruleset_priv)
1625{
1626        struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1627
1628        mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1629}
1630
1631static int
1632mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
1633                                      void *ruleset_priv,
1634                                      struct mlxsw_sp_port *mlxsw_sp_port,
1635                                      bool ingress)
1636{
1637        struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1638
1639        return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->vgroup.group,
1640                                            mlxsw_sp_port, ingress);
1641}
1642
1643static void
1644mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1645                                        void *ruleset_priv,
1646                                        struct mlxsw_sp_port *mlxsw_sp_port,
1647                                        bool ingress)
1648{
1649        struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1650
1651        mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->vgroup.group,
1652                                       mlxsw_sp_port, ingress);
1653}
1654
1655static u16
1656mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
1657{
1658        struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1659
1660        return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1661}
1662
1663static int
1664mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
1665                                  void *ruleset_priv, void *rule_priv,
1666                                  struct mlxsw_sp_acl_rule_info *rulei)
1667{
1668        struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1669        struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1670
1671        return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1672                                            &rule->ventry, rulei);
1673}
1674
1675static void
1676mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1677{
1678        struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1679
1680        mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1681}
1682
1683static int
1684mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1685                                             void *rule_priv,
1686                                             struct mlxsw_sp_acl_rule_info *rulei)
1687{
1688        return -EOPNOTSUPP;
1689}
1690
1691static int
1692mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1693                                           void *rule_priv, bool *activity)
1694{
1695        struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1696
1697        return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
1698                                                     activity);
1699}
1700
1701static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
1702        .ruleset_priv_size      = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
1703        .ruleset_add            = mlxsw_sp_acl_tcam_flower_ruleset_add,
1704        .ruleset_del            = mlxsw_sp_acl_tcam_flower_ruleset_del,
1705        .ruleset_bind           = mlxsw_sp_acl_tcam_flower_ruleset_bind,
1706        .ruleset_unbind         = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
1707        .ruleset_group_id       = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
1708        .rule_priv_size         = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
1709        .rule_add               = mlxsw_sp_acl_tcam_flower_rule_add,
1710        .rule_del               = mlxsw_sp_acl_tcam_flower_rule_del,
1711        .rule_action_replace    = mlxsw_sp_acl_tcam_flower_rule_action_replace,
1712        .rule_activity_get      = mlxsw_sp_acl_tcam_flower_rule_activity_get,
1713};
1714
1715struct mlxsw_sp_acl_tcam_mr_ruleset {
1716        struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1717        struct mlxsw_sp_acl_tcam_vgroup vgroup;
1718};
1719
1720struct mlxsw_sp_acl_tcam_mr_rule {
1721        struct mlxsw_sp_acl_tcam_ventry ventry;
1722};
1723
1724static int
1725mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1726                                 struct mlxsw_sp_acl_tcam *tcam,
1727                                 void *ruleset_priv,
1728                                 struct mlxsw_afk_element_usage *tmplt_elusage,
1729                                 unsigned int *p_min_prio,
1730                                 unsigned int *p_max_prio)
1731{
1732        struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1733        int err;
1734
1735        err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1736                                           mlxsw_sp_acl_tcam_patterns,
1737                                           MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1738                                           tmplt_elusage, false,
1739                                           p_min_prio, p_max_prio);
1740        if (err)
1741                return err;
1742
1743        /* For most of the TCAM clients it would make sense to take a tcam chunk
1744         * only when the first rule is written. This is not the case for
1745         * multicast router as it is required to bind the multicast router to a
1746         * specific ACL Group ID which must exist in HW before multicast router
1747         * is initialized.
1748         */
1749        ruleset->vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp,
1750                                                       &ruleset->vgroup, 1,
1751                                                       tmplt_elusage);
1752        if (IS_ERR(ruleset->vchunk)) {
1753                err = PTR_ERR(ruleset->vchunk);
1754                goto err_chunk_get;
1755        }
1756
1757        return 0;
1758
1759err_chunk_get:
1760        mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1761        return err;
1762}
1763
1764static void
1765mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv)
1766{
1767        struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1768
1769        mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, ruleset->vchunk);
1770        mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1771}
1772
1773static int
1774mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1775                                  struct mlxsw_sp_port *mlxsw_sp_port,
1776                                  bool ingress)
1777{
1778        /* Binding is done when initializing multicast router */
1779        return 0;
1780}
1781
1782static void
1783mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1784                                    void *ruleset_priv,
1785                                    struct mlxsw_sp_port *mlxsw_sp_port,
1786                                    bool ingress)
1787{
1788}
1789
1790static u16
1791mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv)
1792{
1793        struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1794
1795        return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1796}
1797
1798static int
1799mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1800                              void *rule_priv,
1801                              struct mlxsw_sp_acl_rule_info *rulei)
1802{
1803        struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1804        struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1805
1806        return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1807                                           &rule->ventry, rulei);
1808}
1809
1810static void
1811mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1812{
1813        struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1814
1815        mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1816}
1817
1818static int
1819mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1820                                         void *rule_priv,
1821                                         struct mlxsw_sp_acl_rule_info *rulei)
1822{
1823        struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1824
1825        return mlxsw_sp_acl_tcam_ventry_action_replace(mlxsw_sp, &rule->ventry,
1826                                                       rulei);
1827}
1828
1829static int
1830mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1831                                       void *rule_priv, bool *activity)
1832{
1833        struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1834
1835        return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
1836                                                     activity);
1837}
1838
1839static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
1840        .ruleset_priv_size      = sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset),
1841        .ruleset_add            = mlxsw_sp_acl_tcam_mr_ruleset_add,
1842        .ruleset_del            = mlxsw_sp_acl_tcam_mr_ruleset_del,
1843        .ruleset_bind           = mlxsw_sp_acl_tcam_mr_ruleset_bind,
1844        .ruleset_unbind         = mlxsw_sp_acl_tcam_mr_ruleset_unbind,
1845        .ruleset_group_id       = mlxsw_sp_acl_tcam_mr_ruleset_group_id,
1846        .rule_priv_size         = sizeof(struct mlxsw_sp_acl_tcam_mr_rule),
1847        .rule_add               = mlxsw_sp_acl_tcam_mr_rule_add,
1848        .rule_del               = mlxsw_sp_acl_tcam_mr_rule_del,
1849        .rule_action_replace    = mlxsw_sp_acl_tcam_mr_rule_action_replace,
1850        .rule_activity_get      = mlxsw_sp_acl_tcam_mr_rule_activity_get,
1851};
1852
1853static const struct mlxsw_sp_acl_profile_ops *
1854mlxsw_sp_acl_tcam_profile_ops_arr[] = {
1855        [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
1856        [MLXSW_SP_ACL_PROFILE_MR] = &mlxsw_sp_acl_tcam_mr_ops,
1857};
1858
1859const struct mlxsw_sp_acl_profile_ops *
1860mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
1861                              enum mlxsw_sp_acl_profile profile)
1862{
1863        const struct mlxsw_sp_acl_profile_ops *ops;
1864
1865        if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
1866                return NULL;
1867        ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
1868        if (WARN_ON(!ops))
1869                return NULL;
1870        return ops;
1871}
1872