linux/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/* Copyright (c) 2019 Mellanox Technologies. */
   3
   4#include "dr_types.h"
   5
   6#define DR_RULE_MAX_STE_CHAIN (DR_RULE_MAX_STES + DR_ACTION_MAX_STES)
   7
   8struct mlx5dr_rule_action_member {
   9        struct mlx5dr_action *action;
  10        struct list_head list;
  11};
  12
  13static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
  14                                       struct list_head *miss_list,
  15                                       struct list_head *send_list)
  16{
  17        struct mlx5dr_ste_send_info *ste_info_last;
  18        struct mlx5dr_ste *last_ste;
  19
  20        /* The new entry will be inserted after the last */
  21        last_ste = list_last_entry(miss_list, struct mlx5dr_ste, miss_list_node);
  22        WARN_ON(!last_ste);
  23
  24        ste_info_last = kzalloc(sizeof(*ste_info_last), GFP_KERNEL);
  25        if (!ste_info_last)
  26                return -ENOMEM;
  27
  28        mlx5dr_ste_set_miss_addr(last_ste->hw_ste,
  29                                 mlx5dr_ste_get_icm_addr(new_last_ste));
  30        list_add_tail(&new_last_ste->miss_list_node, miss_list);
  31
  32        mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_REDUCED,
  33                                                  0, last_ste->hw_ste,
  34                                                  ste_info_last, send_list, true);
  35
  36        return 0;
  37}
  38
  39static struct mlx5dr_ste *
  40dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
  41                              struct mlx5dr_matcher_rx_tx *nic_matcher,
  42                              u8 *hw_ste)
  43{
  44        struct mlx5dr_domain *dmn = matcher->tbl->dmn;
  45        struct mlx5dr_ste_htbl *new_htbl;
  46        struct mlx5dr_ste *ste;
  47
  48        /* Create new table for miss entry */
  49        new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
  50                                         DR_CHUNK_SIZE_1,
  51                                         MLX5DR_STE_LU_TYPE_DONT_CARE,
  52                                         0);
  53        if (!new_htbl) {
  54                mlx5dr_dbg(dmn, "Failed allocating collision table\n");
  55                return NULL;
  56        }
  57
  58        /* One and only entry, never grows */
  59        ste = new_htbl->ste_arr;
  60        mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
  61        mlx5dr_htbl_get(new_htbl);
  62
  63        return ste;
  64}
  65
  66static struct mlx5dr_ste *
  67dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
  68                               struct mlx5dr_matcher_rx_tx *nic_matcher,
  69                               u8 *hw_ste,
  70                               struct mlx5dr_ste *orig_ste)
  71{
  72        struct mlx5dr_ste *ste;
  73
  74        ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
  75        if (!ste) {
  76                mlx5dr_dbg(matcher->tbl->dmn, "Failed creating collision entry\n");
  77                return NULL;
  78        }
  79
  80        ste->ste_chain_location = orig_ste->ste_chain_location;
  81
  82        /* In collision entry, all members share the same miss_list_head */
  83        ste->htbl->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
  84
  85        /* Next table */
  86        if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
  87                                        DR_CHUNK_SIZE_1)) {
  88                mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
  89                goto free_tbl;
  90        }
  91
  92        return ste;
  93
  94free_tbl:
  95        mlx5dr_ste_free(ste, matcher, nic_matcher);
  96        return NULL;
  97}
  98
  99static int
 100dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
 101                                      struct mlx5dr_domain *dmn)
 102{
 103        int ret;
 104
 105        list_del(&ste_info->send_list);
 106        ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
 107                                       ste_info->size, ste_info->offset);
 108        if (ret)
 109                goto out;
 110        /* Copy data to ste, only reduced size, the last 16B (mask)
 111         * is already written to the hw.
 112         */
 113        memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
 114
 115out:
 116        kfree(ste_info);
 117        return ret;
 118}
 119
 120static int dr_rule_send_update_list(struct list_head *send_ste_list,
 121                                    struct mlx5dr_domain *dmn,
 122                                    bool is_reverse)
 123{
 124        struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
 125        int ret;
 126
 127        if (is_reverse) {
 128                list_for_each_entry_safe_reverse(ste_info, tmp_ste_info,
 129                                                 send_ste_list, send_list) {
 130                        ret = dr_rule_handle_one_ste_in_update_list(ste_info,
 131                                                                    dmn);
 132                        if (ret)
 133                                return ret;
 134                }
 135        } else {
 136                list_for_each_entry_safe(ste_info, tmp_ste_info,
 137                                         send_ste_list, send_list) {
 138                        ret = dr_rule_handle_one_ste_in_update_list(ste_info,
 139                                                                    dmn);
 140                        if (ret)
 141                                return ret;
 142                }
 143        }
 144
 145        return 0;
 146}
 147
 148static struct mlx5dr_ste *
 149dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
 150{
 151        struct mlx5dr_ste *ste;
 152
 153        if (list_empty(miss_list))
 154                return NULL;
 155
 156        /* Check if hw_ste is present in the list */
 157        list_for_each_entry(ste, miss_list, miss_list_node) {
 158                if (mlx5dr_ste_equal_tag(ste->hw_ste, hw_ste))
 159                        return ste;
 160        }
 161
 162        return NULL;
 163}
 164
 165static struct mlx5dr_ste *
 166dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
 167                                struct mlx5dr_matcher_rx_tx *nic_matcher,
 168                                struct list_head *update_list,
 169                                struct mlx5dr_ste *col_ste,
 170                                u8 *hw_ste)
 171{
 172        struct mlx5dr_ste *new_ste;
 173        int ret;
 174
 175        new_ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
 176        if (!new_ste)
 177                return NULL;
 178
 179        /* In collision entry, all members share the same miss_list_head */
 180        new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
 181
 182        /* Update the previous from the list */
 183        ret = dr_rule_append_to_miss_list(new_ste,
 184                                          mlx5dr_ste_get_miss_list(col_ste),
 185                                          update_list);
 186        if (ret) {
 187                mlx5dr_dbg(matcher->tbl->dmn, "Failed update dup entry\n");
 188                goto err_exit;
 189        }
 190
 191        return new_ste;
 192
 193err_exit:
 194        mlx5dr_ste_free(new_ste, matcher, nic_matcher);
 195        return NULL;
 196}
 197
 198static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
 199                                         struct mlx5dr_matcher_rx_tx *nic_matcher,
 200                                         struct mlx5dr_ste *cur_ste,
 201                                         struct mlx5dr_ste *new_ste)
 202{
 203        new_ste->next_htbl = cur_ste->next_htbl;
 204        new_ste->ste_chain_location = cur_ste->ste_chain_location;
 205
 206        if (!mlx5dr_ste_is_last_in_rule(nic_matcher, new_ste->ste_chain_location))
 207                new_ste->next_htbl->pointing_ste = new_ste;
 208
 209        /* We need to copy the refcount since this ste
 210         * may have been traversed several times
 211         */
 212        new_ste->refcount = cur_ste->refcount;
 213
 214        /* Link old STEs rule_mem list to the new ste */
 215        mlx5dr_rule_update_rule_member(cur_ste, new_ste);
 216        INIT_LIST_HEAD(&new_ste->rule_list);
 217        list_splice_tail_init(&cur_ste->rule_list, &new_ste->rule_list);
 218}
 219
 220static struct mlx5dr_ste *
 221dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
 222                        struct mlx5dr_matcher_rx_tx *nic_matcher,
 223                        struct mlx5dr_ste *cur_ste,
 224                        struct mlx5dr_ste_htbl *new_htbl,
 225                        struct list_head *update_list)
 226{
 227        struct mlx5dr_ste_send_info *ste_info;
 228        bool use_update_list = false;
 229        u8 hw_ste[DR_STE_SIZE] = {};
 230        struct mlx5dr_ste *new_ste;
 231        int new_idx;
 232        u8 sb_idx;
 233
 234        /* Copy STE mask from the matcher */
 235        sb_idx = cur_ste->ste_chain_location - 1;
 236        mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
 237
 238        /* Copy STE control and tag */
 239        memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
 240        mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
 241
 242        new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
 243        new_ste = &new_htbl->ste_arr[new_idx];
 244
 245        if (mlx5dr_ste_is_not_used(new_ste)) {
 246                mlx5dr_htbl_get(new_htbl);
 247                list_add_tail(&new_ste->miss_list_node,
 248                              mlx5dr_ste_get_miss_list(new_ste));
 249        } else {
 250                new_ste = dr_rule_rehash_handle_collision(matcher,
 251                                                          nic_matcher,
 252                                                          update_list,
 253                                                          new_ste,
 254                                                          hw_ste);
 255                if (!new_ste) {
 256                        mlx5dr_dbg(matcher->tbl->dmn, "Failed adding collision entry, index: %d\n",
 257                                   new_idx);
 258                        return NULL;
 259                }
 260                new_htbl->ctrl.num_of_collisions++;
 261                use_update_list = true;
 262        }
 263
 264        memcpy(new_ste->hw_ste, hw_ste, DR_STE_SIZE_REDUCED);
 265
 266        new_htbl->ctrl.num_of_valid_entries++;
 267
 268        if (use_update_list) {
 269                ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
 270                if (!ste_info)
 271                        goto err_exit;
 272
 273                mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0,
 274                                                          hw_ste, ste_info,
 275                                                          update_list, true);
 276        }
 277
 278        dr_rule_rehash_copy_ste_ctrl(matcher, nic_matcher, cur_ste, new_ste);
 279
 280        return new_ste;
 281
 282err_exit:
 283        mlx5dr_ste_free(new_ste, matcher, nic_matcher);
 284        return NULL;
 285}
 286
 287static int dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher *matcher,
 288                                         struct mlx5dr_matcher_rx_tx *nic_matcher,
 289                                         struct list_head *cur_miss_list,
 290                                         struct mlx5dr_ste_htbl *new_htbl,
 291                                         struct list_head *update_list)
 292{
 293        struct mlx5dr_ste *tmp_ste, *cur_ste, *new_ste;
 294
 295        if (list_empty(cur_miss_list))
 296                return 0;
 297
 298        list_for_each_entry_safe(cur_ste, tmp_ste, cur_miss_list, miss_list_node) {
 299                new_ste = dr_rule_rehash_copy_ste(matcher,
 300                                                  nic_matcher,
 301                                                  cur_ste,
 302                                                  new_htbl,
 303                                                  update_list);
 304                if (!new_ste)
 305                        goto err_insert;
 306
 307                list_del(&cur_ste->miss_list_node);
 308                mlx5dr_htbl_put(cur_ste->htbl);
 309        }
 310        return 0;
 311
 312err_insert:
 313        mlx5dr_err(matcher->tbl->dmn, "Fatal error during resize\n");
 314        WARN_ON(true);
 315        return -EINVAL;
 316}
 317
 318static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
 319                                    struct mlx5dr_matcher_rx_tx *nic_matcher,
 320                                    struct mlx5dr_ste_htbl *cur_htbl,
 321                                    struct mlx5dr_ste_htbl *new_htbl,
 322                                    struct list_head *update_list)
 323{
 324        struct mlx5dr_ste *cur_ste;
 325        int cur_entries;
 326        int err = 0;
 327        int i;
 328
 329        cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk_size);
 330
 331        if (cur_entries < 1) {
 332                mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
 333                return -EINVAL;
 334        }
 335
 336        for (i = 0; i < cur_entries; i++) {
 337                cur_ste = &cur_htbl->ste_arr[i];
 338                if (mlx5dr_ste_is_not_used(cur_ste)) /* Empty, nothing to copy */
 339                        continue;
 340
 341                err = dr_rule_rehash_copy_miss_list(matcher,
 342                                                    nic_matcher,
 343                                                    mlx5dr_ste_get_miss_list(cur_ste),
 344                                                    new_htbl,
 345                                                    update_list);
 346                if (err)
 347                        goto clean_copy;
 348        }
 349
 350clean_copy:
 351        return err;
 352}
 353
 354static struct mlx5dr_ste_htbl *
 355dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
 356                    struct mlx5dr_rule_rx_tx *nic_rule,
 357                    struct mlx5dr_ste_htbl *cur_htbl,
 358                    u8 ste_location,
 359                    struct list_head *update_list,
 360                    enum mlx5dr_icm_chunk_size new_size)
 361{
 362        struct mlx5dr_ste_send_info *del_ste_info, *tmp_ste_info;
 363        struct mlx5dr_matcher *matcher = rule->matcher;
 364        struct mlx5dr_domain *dmn = matcher->tbl->dmn;
 365        struct mlx5dr_matcher_rx_tx *nic_matcher;
 366        struct mlx5dr_ste_send_info *ste_info;
 367        struct mlx5dr_htbl_connect_info info;
 368        struct mlx5dr_domain_rx_tx *nic_dmn;
 369        u8 formatted_ste[DR_STE_SIZE] = {};
 370        LIST_HEAD(rehash_table_send_list);
 371        struct mlx5dr_ste *ste_to_update;
 372        struct mlx5dr_ste_htbl *new_htbl;
 373        int err;
 374
 375        nic_matcher = nic_rule->nic_matcher;
 376        nic_dmn = nic_matcher->nic_tbl->nic_dmn;
 377
 378        ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
 379        if (!ste_info)
 380                return NULL;
 381
 382        new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
 383                                         new_size,
 384                                         cur_htbl->lu_type,
 385                                         cur_htbl->byte_mask);
 386        if (!new_htbl) {
 387                mlx5dr_err(dmn, "Failed to allocate new hash table\n");
 388                goto free_ste_info;
 389        }
 390
 391        /* Write new table to HW */
 392        info.type = CONNECT_MISS;
 393        info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
 394        mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
 395                                     nic_dmn,
 396                                     new_htbl,
 397                                     formatted_ste,
 398                                     &info);
 399
 400        new_htbl->pointing_ste = cur_htbl->pointing_ste;
 401        new_htbl->pointing_ste->next_htbl = new_htbl;
 402        err = dr_rule_rehash_copy_htbl(matcher,
 403                                       nic_matcher,
 404                                       cur_htbl,
 405                                       new_htbl,
 406                                       &rehash_table_send_list);
 407        if (err)
 408                goto free_new_htbl;
 409
 410        if (mlx5dr_send_postsend_htbl(dmn, new_htbl, formatted_ste,
 411                                      nic_matcher->ste_builder[ste_location - 1].bit_mask)) {
 412                mlx5dr_err(dmn, "Failed writing table to HW\n");
 413                goto free_new_htbl;
 414        }
 415
 416        /* Writing to the hw is done in regular order of rehash_table_send_list,
 417         * in order to have the origin data written before the miss address of
 418         * collision entries, if exists.
 419         */
 420        if (dr_rule_send_update_list(&rehash_table_send_list, dmn, false)) {
 421                mlx5dr_err(dmn, "Failed updating table to HW\n");
 422                goto free_ste_list;
 423        }
 424
 425        /* Connect previous hash table to current */
 426        if (ste_location == 1) {
 427                /* The previous table is an anchor, anchors size is always one STE */
 428                struct mlx5dr_ste_htbl *prev_htbl = cur_htbl->pointing_ste->htbl;
 429
 430                /* On matcher s_anchor we keep an extra refcount */
 431                mlx5dr_htbl_get(new_htbl);
 432                mlx5dr_htbl_put(cur_htbl);
 433
 434                nic_matcher->s_htbl = new_htbl;
 435
 436                /* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
 437                 * (48B len) which works only on first 32B
 438                 */
 439                mlx5dr_ste_set_hit_addr(prev_htbl->ste_arr[0].hw_ste,
 440                                        new_htbl->chunk->icm_addr,
 441                                        new_htbl->chunk->num_of_entries);
 442
 443                ste_to_update = &prev_htbl->ste_arr[0];
 444        } else {
 445                mlx5dr_ste_set_hit_addr_by_next_htbl(cur_htbl->pointing_ste->hw_ste,
 446                                                     new_htbl);
 447                ste_to_update = cur_htbl->pointing_ste;
 448        }
 449
 450        mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_REDUCED,
 451                                                  0, ste_to_update->hw_ste, ste_info,
 452                                                  update_list, false);
 453
 454        return new_htbl;
 455
 456free_ste_list:
 457        /* Clean all ste_info's from the new table */
 458        list_for_each_entry_safe(del_ste_info, tmp_ste_info,
 459                                 &rehash_table_send_list, send_list) {
 460                list_del(&del_ste_info->send_list);
 461                kfree(del_ste_info);
 462        }
 463
 464free_new_htbl:
 465        mlx5dr_ste_htbl_free(new_htbl);
 466free_ste_info:
 467        kfree(ste_info);
 468        mlx5dr_info(dmn, "Failed creating rehash table\n");
 469        return NULL;
 470}
 471
 472static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
 473                                              struct mlx5dr_rule_rx_tx *nic_rule,
 474                                              struct mlx5dr_ste_htbl *cur_htbl,
 475                                              u8 ste_location,
 476                                              struct list_head *update_list)
 477{
 478        struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
 479        enum mlx5dr_icm_chunk_size new_size;
 480
 481        new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk_size);
 482        new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
 483
 484        if (new_size == cur_htbl->chunk_size)
 485                return NULL; /* Skip rehash, we already at the max size */
 486
 487        return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
 488                                   update_list, new_size);
 489}
 490
 491static struct mlx5dr_ste *
 492dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
 493                         struct mlx5dr_matcher_rx_tx *nic_matcher,
 494                         struct mlx5dr_ste *ste,
 495                         u8 *hw_ste,
 496                         struct list_head *miss_list,
 497                         struct list_head *send_list)
 498{
 499        struct mlx5dr_ste_send_info *ste_info;
 500        struct mlx5dr_ste *new_ste;
 501
 502        ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
 503        if (!ste_info)
 504                return NULL;
 505
 506        new_ste = dr_rule_create_collision_entry(matcher, nic_matcher, hw_ste, ste);
 507        if (!new_ste)
 508                goto free_send_info;
 509
 510        if (dr_rule_append_to_miss_list(new_ste, miss_list, send_list)) {
 511                mlx5dr_dbg(matcher->tbl->dmn, "Failed to update prev miss_list\n");
 512                goto err_exit;
 513        }
 514
 515        mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0, hw_ste,
 516                                                  ste_info, send_list, false);
 517
 518        ste->htbl->ctrl.num_of_collisions++;
 519        ste->htbl->ctrl.num_of_valid_entries++;
 520
 521        return new_ste;
 522
 523err_exit:
 524        mlx5dr_ste_free(new_ste, matcher, nic_matcher);
 525free_send_info:
 526        kfree(ste_info);
 527        return NULL;
 528}
 529
 530static void dr_rule_remove_action_members(struct mlx5dr_rule *rule)
 531{
 532        struct mlx5dr_rule_action_member *action_mem;
 533        struct mlx5dr_rule_action_member *tmp;
 534
 535        list_for_each_entry_safe(action_mem, tmp, &rule->rule_actions_list, list) {
 536                list_del(&action_mem->list);
 537                refcount_dec(&action_mem->action->refcount);
 538                kvfree(action_mem);
 539        }
 540}
 541
 542static int dr_rule_add_action_members(struct mlx5dr_rule *rule,
 543                                      size_t num_actions,
 544                                      struct mlx5dr_action *actions[])
 545{
 546        struct mlx5dr_rule_action_member *action_mem;
 547        int i;
 548
 549        for (i = 0; i < num_actions; i++) {
 550                action_mem = kvzalloc(sizeof(*action_mem), GFP_KERNEL);
 551                if (!action_mem)
 552                        goto free_action_members;
 553
 554                action_mem->action = actions[i];
 555                INIT_LIST_HEAD(&action_mem->list);
 556                list_add_tail(&action_mem->list, &rule->rule_actions_list);
 557                refcount_inc(&action_mem->action->refcount);
 558        }
 559
 560        return 0;
 561
 562free_action_members:
 563        dr_rule_remove_action_members(rule);
 564        return -ENOMEM;
 565}
 566
 567/* While the pointer of ste is no longer valid, like while moving ste to be
 568 * the first in the miss_list, and to be in the origin table,
 569 * all rule-members that are attached to this ste should update their ste member
 570 * to the new pointer
 571 */
 572void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *ste,
 573                                    struct mlx5dr_ste *new_ste)
 574{
 575        struct mlx5dr_rule_member *rule_mem;
 576
 577        list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list)
 578                rule_mem->ste = new_ste;
 579}
 580
 581static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
 582                                       struct mlx5dr_rule_rx_tx *nic_rule)
 583{
 584        struct mlx5dr_rule_member *rule_mem;
 585        struct mlx5dr_rule_member *tmp_mem;
 586
 587        if (list_empty(&nic_rule->rule_members_list))
 588                return;
 589        list_for_each_entry_safe(rule_mem, tmp_mem, &nic_rule->rule_members_list, list) {
 590                list_del(&rule_mem->list);
 591                list_del(&rule_mem->use_ste_list);
 592                mlx5dr_ste_put(rule_mem->ste, rule->matcher, nic_rule->nic_matcher);
 593                kvfree(rule_mem);
 594        }
 595}
 596
 597static u16 dr_get_bits_per_mask(u16 byte_mask)
 598{
 599        u16 bits = 0;
 600
 601        while (byte_mask) {
 602                byte_mask = byte_mask & (byte_mask - 1);
 603                bits++;
 604        }
 605
 606        return bits;
 607}
 608
 609static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
 610                                      struct mlx5dr_domain *dmn,
 611                                      struct mlx5dr_domain_rx_tx *nic_dmn)
 612{
 613        struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
 614
 615        if (dmn->info.max_log_sw_icm_sz <= htbl->chunk_size)
 616                return false;
 617
 618        if (!ctrl->may_grow)
 619                return false;
 620
 621        if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size)
 622                return false;
 623
 624        if (ctrl->num_of_collisions >= ctrl->increase_threshold &&
 625            (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold)
 626                return true;
 627
 628        return false;
 629}
 630
 631static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule,
 632                              struct mlx5dr_ste *ste)
 633{
 634        struct mlx5dr_rule_member *rule_mem;
 635
 636        rule_mem = kvzalloc(sizeof(*rule_mem), GFP_KERNEL);
 637        if (!rule_mem)
 638                return -ENOMEM;
 639
 640        INIT_LIST_HEAD(&rule_mem->list);
 641        INIT_LIST_HEAD(&rule_mem->use_ste_list);
 642
 643        rule_mem->ste = ste;
 644        list_add_tail(&rule_mem->list, &nic_rule->rule_members_list);
 645
 646        list_add_tail(&rule_mem->use_ste_list, &ste->rule_list);
 647
 648        return 0;
 649}
 650
 651static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
 652                                      struct mlx5dr_rule_rx_tx *nic_rule,
 653                                      struct list_head *send_ste_list,
 654                                      struct mlx5dr_ste *last_ste,
 655                                      u8 *hw_ste_arr,
 656                                      u32 new_hw_ste_arr_sz)
 657{
 658        struct mlx5dr_matcher_rx_tx *nic_matcher = nic_rule->nic_matcher;
 659        struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
 660        u8 num_of_builders = nic_matcher->num_of_builders;
 661        struct mlx5dr_matcher *matcher = rule->matcher;
 662        u8 *curr_hw_ste, *prev_hw_ste;
 663        struct mlx5dr_ste *action_ste;
 664        int i, k, ret;
 665
 666        /* Two cases:
 667         * 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste
 668         * 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added
 669         *    to support the action.
 670         */
 671        if (num_of_builders == new_hw_ste_arr_sz)
 672                return 0;
 673
 674        for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) {
 675                curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE;
 676                prev_hw_ste = (i == 0) ? curr_hw_ste : hw_ste_arr + ((i - 1) * DR_STE_SIZE);
 677                action_ste = dr_rule_create_collision_htbl(matcher,
 678                                                           nic_matcher,
 679                                                           curr_hw_ste);
 680                if (!action_ste)
 681                        return -ENOMEM;
 682
 683                mlx5dr_ste_get(action_ste);
 684
 685                /* While free ste we go over the miss list, so add this ste to the list */
 686                list_add_tail(&action_ste->miss_list_node,
 687                              mlx5dr_ste_get_miss_list(action_ste));
 688
 689                ste_info_arr[k] = kzalloc(sizeof(*ste_info_arr[k]),
 690                                          GFP_KERNEL);
 691                if (!ste_info_arr[k])
 692                        goto err_exit;
 693
 694                /* Point current ste to the new action */
 695                mlx5dr_ste_set_hit_addr_by_next_htbl(prev_hw_ste, action_ste->htbl);
 696                ret = dr_rule_add_member(nic_rule, action_ste);
 697                if (ret) {
 698                        mlx5dr_dbg(matcher->tbl->dmn, "Failed adding rule member\n");
 699                        goto free_ste_info;
 700                }
 701                mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
 702                                                          curr_hw_ste,
 703                                                          ste_info_arr[k],
 704                                                          send_ste_list, false);
 705        }
 706
 707        return 0;
 708
 709free_ste_info:
 710        kfree(ste_info_arr[k]);
 711err_exit:
 712        mlx5dr_ste_put(action_ste, matcher, nic_matcher);
 713        return -ENOMEM;
 714}
 715
 716static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
 717                                      struct mlx5dr_matcher_rx_tx *nic_matcher,
 718                                      struct mlx5dr_ste_htbl *cur_htbl,
 719                                      struct mlx5dr_ste *ste,
 720                                      u8 ste_location,
 721                                      u8 *hw_ste,
 722                                      struct list_head *miss_list,
 723                                      struct list_head *send_list)
 724{
 725        struct mlx5dr_ste_send_info *ste_info;
 726
 727        /* Take ref on table, only on first time this ste is used */
 728        mlx5dr_htbl_get(cur_htbl);
 729
 730        /* new entry -> new branch */
 731        list_add_tail(&ste->miss_list_node, miss_list);
 732
 733        mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
 734
 735        ste->ste_chain_location = ste_location;
 736
 737        ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
 738        if (!ste_info)
 739                goto clean_ste_setting;
 740
 741        if (mlx5dr_ste_create_next_htbl(matcher,
 742                                        nic_matcher,
 743                                        ste,
 744                                        hw_ste,
 745                                        DR_CHUNK_SIZE_1)) {
 746                mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
 747                goto clean_ste_info;
 748        }
 749
 750        cur_htbl->ctrl.num_of_valid_entries++;
 751
 752        mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 0, hw_ste,
 753                                                  ste_info, send_list, false);
 754
 755        return 0;
 756
 757clean_ste_info:
 758        kfree(ste_info);
 759clean_ste_setting:
 760        list_del_init(&ste->miss_list_node);
 761        mlx5dr_htbl_put(cur_htbl);
 762
 763        return -ENOMEM;
 764}
 765
 766static struct mlx5dr_ste *
 767dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
 768                          struct mlx5dr_rule_rx_tx *nic_rule,
 769                          struct list_head *send_ste_list,
 770                          struct mlx5dr_ste_htbl *cur_htbl,
 771                          u8 *hw_ste,
 772                          u8 ste_location,
 773                          struct mlx5dr_ste_htbl **put_htbl)
 774{
 775        struct mlx5dr_matcher *matcher = rule->matcher;
 776        struct mlx5dr_domain *dmn = matcher->tbl->dmn;
 777        struct mlx5dr_matcher_rx_tx *nic_matcher;
 778        struct mlx5dr_domain_rx_tx *nic_dmn;
 779        struct mlx5dr_ste_htbl *new_htbl;
 780        struct mlx5dr_ste *matched_ste;
 781        struct list_head *miss_list;
 782        bool skip_rehash = false;
 783        struct mlx5dr_ste *ste;
 784        int index;
 785
 786        nic_matcher = nic_rule->nic_matcher;
 787        nic_dmn = nic_matcher->nic_tbl->nic_dmn;
 788
 789again:
 790        index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
 791        miss_list = &cur_htbl->chunk->miss_list[index];
 792        ste = &cur_htbl->ste_arr[index];
 793
 794        if (mlx5dr_ste_is_not_used(ste)) {
 795                if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
 796                                               ste, ste_location,
 797                                               hw_ste, miss_list,
 798                                               send_ste_list))
 799                        return NULL;
 800        } else {
 801                /* Hash table index in use, check if this ste is in the miss list */
 802                matched_ste = dr_rule_find_ste_in_miss_list(miss_list, hw_ste);
 803                if (matched_ste) {
 804                        /* If it is last STE in the chain, and has the same tag
 805                         * it means that all the previous stes are the same,
 806                         * if so, this rule is duplicated.
 807                         */
 808                        if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location))
 809                                return matched_ste;
 810
 811                        mlx5dr_dbg(dmn, "Duplicate rule inserted\n");
 812                }
 813
 814                if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
 815                        /* Hash table index in use, try to resize of the hash */
 816                        skip_rehash = true;
 817
 818                        /* Hold the table till we update.
 819                         * Release in dr_rule_create_rule()
 820                         */
 821                        *put_htbl = cur_htbl;
 822                        mlx5dr_htbl_get(cur_htbl);
 823
 824                        new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl,
 825                                                  ste_location, send_ste_list);
 826                        if (!new_htbl) {
 827                                mlx5dr_htbl_put(cur_htbl);
 828                                mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
 829                                           cur_htbl->chunk_size);
 830                        } else {
 831                                cur_htbl = new_htbl;
 832                        }
 833                        goto again;
 834                } else {
 835                        /* Hash table index in use, add another collision (miss) */
 836                        ste = dr_rule_handle_collision(matcher,
 837                                                       nic_matcher,
 838                                                       ste,
 839                                                       hw_ste,
 840                                                       miss_list,
 841                                                       send_ste_list);
 842                        if (!ste) {
 843                                mlx5dr_dbg(dmn, "failed adding collision entry, index: %d\n",
 844                                           index);
 845                                return NULL;
 846                        }
 847                }
 848        }
 849        return ste;
 850}
 851
 852static bool dr_rule_cmp_value_to_mask(u8 *mask, u8 *value,
 853                                      u32 s_idx, u32 e_idx)
 854{
 855        u32 i;
 856
 857        for (i = s_idx; i < e_idx; i++) {
 858                if (value[i] & ~mask[i]) {
 859                        pr_info("Rule parameters contains a value not specified by mask\n");
 860                        return false;
 861                }
 862        }
 863        return true;
 864}
 865
 866static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
 867                           struct mlx5dr_match_parameters *value,
 868                           struct mlx5dr_match_param *param)
 869{
 870        u8 match_criteria = matcher->match_criteria;
 871        size_t value_size = value->match_sz;
 872        u8 *mask_p = (u8 *)&matcher->mask;
 873        u8 *param_p = (u8 *)param;
 874        u32 s_idx, e_idx;
 875
 876        if (!value_size ||
 877            (value_size > sizeof(struct mlx5dr_match_param) ||
 878             (value_size % sizeof(u32)))) {
 879                mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
 880                return false;
 881        }
 882
 883        mlx5dr_ste_copy_param(matcher->match_criteria, param, value);
 884
 885        if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
 886                s_idx = offsetof(struct mlx5dr_match_param, outer);
 887                e_idx = min(s_idx + sizeof(param->outer), value_size);
 888
 889                if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
 890                        mlx5dr_err(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
 891                        return false;
 892                }
 893        }
 894
 895        if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
 896                s_idx = offsetof(struct mlx5dr_match_param, misc);
 897                e_idx = min(s_idx + sizeof(param->misc), value_size);
 898
 899                if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
 900                        mlx5dr_err(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
 901                        return false;
 902                }
 903        }
 904
 905        if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
 906                s_idx = offsetof(struct mlx5dr_match_param, inner);
 907                e_idx = min(s_idx + sizeof(param->inner), value_size);
 908
 909                if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
 910                        mlx5dr_err(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
 911                        return false;
 912                }
 913        }
 914
 915        if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
 916                s_idx = offsetof(struct mlx5dr_match_param, misc2);
 917                e_idx = min(s_idx + sizeof(param->misc2), value_size);
 918
 919                if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
 920                        mlx5dr_err(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
 921                        return false;
 922                }
 923        }
 924
 925        if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
 926                s_idx = offsetof(struct mlx5dr_match_param, misc3);
 927                e_idx = min(s_idx + sizeof(param->misc3), value_size);
 928
 929                if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
 930                        mlx5dr_err(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
 931                        return false;
 932                }
 933        }
 934        return true;
 935}
 936
 937static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
 938                                    struct mlx5dr_rule_rx_tx *nic_rule)
 939{
 940        mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
 941        dr_rule_clean_rule_members(rule, nic_rule);
 942        mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
 943
 944        return 0;
 945}
 946
 947static int dr_rule_destroy_rule_fdb(struct mlx5dr_rule *rule)
 948{
 949        dr_rule_destroy_rule_nic(rule, &rule->rx);
 950        dr_rule_destroy_rule_nic(rule, &rule->tx);
 951        return 0;
 952}
 953
 954static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
 955{
 956        struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
 957
 958        switch (dmn->type) {
 959        case MLX5DR_DOMAIN_TYPE_NIC_RX:
 960                dr_rule_destroy_rule_nic(rule, &rule->rx);
 961                break;
 962        case MLX5DR_DOMAIN_TYPE_NIC_TX:
 963                dr_rule_destroy_rule_nic(rule, &rule->tx);
 964                break;
 965        case MLX5DR_DOMAIN_TYPE_FDB:
 966                dr_rule_destroy_rule_fdb(rule);
 967                break;
 968        default:
 969                return -EINVAL;
 970        }
 971
 972        dr_rule_remove_action_members(rule);
 973        kfree(rule);
 974        return 0;
 975}
 976
 977static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec)
 978{
 979        if (spec->ip_version == 6 || spec->ethertype == ETH_P_IPV6)
 980                return DR_RULE_IPV6;
 981
 982        return DR_RULE_IPV4;
 983}
 984
 985static bool dr_rule_skip(enum mlx5dr_domain_type domain,
 986                         enum mlx5dr_ste_entry_type ste_type,
 987                         struct mlx5dr_match_param *mask,
 988                         struct mlx5dr_match_param *value,
 989                         u32 flow_source)
 990{
 991        bool rx = ste_type == MLX5DR_STE_TYPE_RX;
 992
 993        if (domain != MLX5DR_DOMAIN_TYPE_FDB)
 994                return false;
 995
 996        if (mask->misc.source_port) {
 997                if (rx && value->misc.source_port != WIRE_PORT)
 998                        return true;
 999
1000                if (!rx && value->misc.source_port == WIRE_PORT)
1001                        return true;
1002        }
1003
1004        if (rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT)
1005                return true;
1006
1007        if (!rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK)
1008                return true;
1009
1010        return false;
1011}
1012
1013static int
1014dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
1015                        struct mlx5dr_rule_rx_tx *nic_rule,
1016                        struct mlx5dr_match_param *param,
1017                        size_t num_actions,
1018                        struct mlx5dr_action *actions[])
1019{
1020        struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
1021        struct mlx5dr_matcher *matcher = rule->matcher;
1022        struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1023        struct mlx5dr_matcher_rx_tx *nic_matcher;
1024        struct mlx5dr_domain_rx_tx *nic_dmn;
1025        struct mlx5dr_ste_htbl *htbl = NULL;
1026        struct mlx5dr_ste_htbl *cur_htbl;
1027        struct mlx5dr_ste *ste = NULL;
1028        LIST_HEAD(send_ste_list);
1029        u8 *hw_ste_arr = NULL;
1030        u32 new_hw_ste_arr_sz;
1031        int ret, i;
1032
1033        nic_matcher = nic_rule->nic_matcher;
1034        nic_dmn = nic_matcher->nic_tbl->nic_dmn;
1035
1036        INIT_LIST_HEAD(&nic_rule->rule_members_list);
1037
1038        if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param,
1039                         rule->flow_source))
1040                return 0;
1041
1042        hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
1043        if (!hw_ste_arr)
1044                return -ENOMEM;
1045
1046        mlx5dr_domain_nic_lock(nic_dmn);
1047
1048        ret = mlx5dr_matcher_select_builders(matcher,
1049                                             nic_matcher,
1050                                             dr_rule_get_ipv(&param->outer),
1051                                             dr_rule_get_ipv(&param->inner));
1052        if (ret)
1053                goto free_hw_ste;
1054
1055        /* Set the tag values inside the ste array */
1056        ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
1057        if (ret)
1058                goto free_hw_ste;
1059
1060        /* Set the actions values/addresses inside the ste array */
1061        ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions,
1062                                           num_actions, hw_ste_arr,
1063                                           &new_hw_ste_arr_sz);
1064        if (ret)
1065                goto free_hw_ste;
1066
1067        cur_htbl = nic_matcher->s_htbl;
1068
1069        /* Go over the array of STEs, and build dr_ste accordingly.
1070         * The loop is over only the builders which are equal or less to the
1071         * number of stes, in case we have actions that lives in other stes.
1072         */
1073        for (i = 0; i < nic_matcher->num_of_builders; i++) {
1074                /* Calculate CRC and keep new ste entry */
1075                u8 *cur_hw_ste_ent = hw_ste_arr + (i * DR_STE_SIZE);
1076
1077                ste = dr_rule_handle_ste_branch(rule,
1078                                                nic_rule,
1079                                                &send_ste_list,
1080                                                cur_htbl,
1081                                                cur_hw_ste_ent,
1082                                                i + 1,
1083                                                &htbl);
1084                if (!ste) {
1085                        mlx5dr_err(dmn, "Failed creating next branch\n");
1086                        ret = -ENOENT;
1087                        goto free_rule;
1088                }
1089
1090                cur_htbl = ste->next_htbl;
1091
1092                /* Keep all STEs in the rule struct */
1093                ret = dr_rule_add_member(nic_rule, ste);
1094                if (ret) {
1095                        mlx5dr_dbg(dmn, "Failed adding rule member index %d\n", i);
1096                        goto free_ste;
1097                }
1098
1099                mlx5dr_ste_get(ste);
1100        }
1101
1102        /* Connect actions */
1103        ret = dr_rule_handle_action_stes(rule, nic_rule, &send_ste_list,
1104                                         ste, hw_ste_arr, new_hw_ste_arr_sz);
1105        if (ret) {
1106                mlx5dr_dbg(dmn, "Failed apply actions\n");
1107                goto free_rule;
1108        }
1109        ret = dr_rule_send_update_list(&send_ste_list, dmn, true);
1110        if (ret) {
1111                mlx5dr_err(dmn, "Failed sending ste!\n");
1112                goto free_rule;
1113        }
1114
1115        if (htbl)
1116                mlx5dr_htbl_put(htbl);
1117
1118        mlx5dr_domain_nic_unlock(nic_dmn);
1119
1120        kfree(hw_ste_arr);
1121
1122        return 0;
1123
1124free_ste:
1125        mlx5dr_ste_put(ste, matcher, nic_matcher);
1126free_rule:
1127        dr_rule_clean_rule_members(rule, nic_rule);
1128        /* Clean all ste_info's */
1129        list_for_each_entry_safe(ste_info, tmp_ste_info, &send_ste_list, send_list) {
1130                list_del(&ste_info->send_list);
1131                kfree(ste_info);
1132        }
1133free_hw_ste:
1134        mlx5dr_domain_nic_unlock(nic_dmn);
1135        kfree(hw_ste_arr);
1136        return ret;
1137}
1138
1139static int
1140dr_rule_create_rule_fdb(struct mlx5dr_rule *rule,
1141                        struct mlx5dr_match_param *param,
1142                        size_t num_actions,
1143                        struct mlx5dr_action *actions[])
1144{
1145        struct mlx5dr_match_param copy_param = {};
1146        int ret;
1147
1148        /* Copy match_param since they will be consumed during the first
1149         * nic_rule insertion.
1150         */
1151        memcpy(&copy_param, param, sizeof(struct mlx5dr_match_param));
1152
1153        ret = dr_rule_create_rule_nic(rule, &rule->rx, param,
1154                                      num_actions, actions);
1155        if (ret)
1156                return ret;
1157
1158        ret = dr_rule_create_rule_nic(rule, &rule->tx, &copy_param,
1159                                      num_actions, actions);
1160        if (ret)
1161                goto destroy_rule_nic_rx;
1162
1163        return 0;
1164
1165destroy_rule_nic_rx:
1166        dr_rule_destroy_rule_nic(rule, &rule->rx);
1167        return ret;
1168}
1169
1170static struct mlx5dr_rule *
1171dr_rule_create_rule(struct mlx5dr_matcher *matcher,
1172                    struct mlx5dr_match_parameters *value,
1173                    size_t num_actions,
1174                    struct mlx5dr_action *actions[],
1175                    u32 flow_source)
1176{
1177        struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1178        struct mlx5dr_match_param param = {};
1179        struct mlx5dr_rule *rule;
1180        int ret;
1181
1182        if (!dr_rule_verify(matcher, value, &param))
1183                return NULL;
1184
1185        rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1186        if (!rule)
1187                return NULL;
1188
1189        rule->matcher = matcher;
1190        rule->flow_source = flow_source;
1191        INIT_LIST_HEAD(&rule->rule_actions_list);
1192
1193        ret = dr_rule_add_action_members(rule, num_actions, actions);
1194        if (ret)
1195                goto free_rule;
1196
1197        switch (dmn->type) {
1198        case MLX5DR_DOMAIN_TYPE_NIC_RX:
1199                rule->rx.nic_matcher = &matcher->rx;
1200                ret = dr_rule_create_rule_nic(rule, &rule->rx, &param,
1201                                              num_actions, actions);
1202                break;
1203        case MLX5DR_DOMAIN_TYPE_NIC_TX:
1204                rule->tx.nic_matcher = &matcher->tx;
1205                ret = dr_rule_create_rule_nic(rule, &rule->tx, &param,
1206                                              num_actions, actions);
1207                break;
1208        case MLX5DR_DOMAIN_TYPE_FDB:
1209                rule->rx.nic_matcher = &matcher->rx;
1210                rule->tx.nic_matcher = &matcher->tx;
1211                ret = dr_rule_create_rule_fdb(rule, &param,
1212                                              num_actions, actions);
1213                break;
1214        default:
1215                ret = -EINVAL;
1216                break;
1217        }
1218
1219        if (ret)
1220                goto remove_action_members;
1221
1222        return rule;
1223
1224remove_action_members:
1225        dr_rule_remove_action_members(rule);
1226free_rule:
1227        kfree(rule);
1228        mlx5dr_err(dmn, "Failed creating rule\n");
1229        return NULL;
1230}
1231
1232struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
1233                                       struct mlx5dr_match_parameters *value,
1234                                       size_t num_actions,
1235                                       struct mlx5dr_action *actions[],
1236                                       u32 flow_source)
1237{
1238        struct mlx5dr_rule *rule;
1239
1240        refcount_inc(&matcher->refcount);
1241
1242        rule = dr_rule_create_rule(matcher, value, num_actions, actions, flow_source);
1243        if (!rule)
1244                refcount_dec(&matcher->refcount);
1245
1246        return rule;
1247}
1248
1249int mlx5dr_rule_destroy(struct mlx5dr_rule *rule)
1250{
1251        struct mlx5dr_matcher *matcher = rule->matcher;
1252        int ret;
1253
1254        ret = dr_rule_destroy_rule(rule);
1255        if (!ret)
1256                refcount_dec(&matcher->refcount);
1257
1258        return ret;
1259}
1260