linux/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/* Copyright (c) 2019 Mellanox Technologies. */
   3
   4#include <linux/types.h>
   5#include <linux/crc32.h>
   6#include "dr_ste.h"
   7
   8struct dr_hw_ste_format {
   9        u8 ctrl[DR_STE_SIZE_CTRL];
  10        u8 tag[DR_STE_SIZE_TAG];
  11        u8 mask[DR_STE_SIZE_MASK];
  12};
  13
  14static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
  15{
  16        u32 crc = crc32(0, input_data, length);
  17
  18        return (__force u32)htonl(crc);
  19}
  20
  21bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
  22{
  23        return caps->sw_format_ver > MLX5_STEERING_FORMAT_CONNECTX_5;
  24}
  25
  26u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
  27{
  28        struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
  29        u8 masked[DR_STE_SIZE_TAG] = {};
  30        u32 crc32, index;
  31        u16 bit;
  32        int i;
  33
  34        /* Don't calculate CRC if the result is predicted */
  35        if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0)
  36                return 0;
  37
  38        /* Mask tag using byte mask, bit per byte */
  39        bit = 1 << (DR_STE_SIZE_TAG - 1);
  40        for (i = 0; i < DR_STE_SIZE_TAG; i++) {
  41                if (htbl->byte_mask & bit)
  42                        masked[i] = hw_ste->tag[i];
  43
  44                bit = bit >> 1;
  45        }
  46
  47        crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
  48        index = crc32 & (htbl->chunk->num_of_entries - 1);
  49
  50        return index;
  51}
  52
  53u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
  54{
  55        u16 byte_mask = 0;
  56        int i;
  57
  58        for (i = 0; i < DR_STE_SIZE_MASK; i++) {
  59                byte_mask = byte_mask << 1;
  60                if (bit_mask[i] == 0xff)
  61                        byte_mask |= 1;
  62        }
  63        return byte_mask;
  64}
  65
  66static u8 *dr_ste_get_tag(u8 *hw_ste_p)
  67{
  68        struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
  69
  70        return hw_ste->tag;
  71}
  72
  73void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
  74{
  75        struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
  76
  77        memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
  78}
  79
  80static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
  81{
  82        memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
  83        memset(&hw_ste->mask, 0, sizeof(hw_ste->mask));
  84}
  85
  86static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
  87{
  88        hw_ste->tag[0] = 0xdc;
  89        hw_ste->mask[0] = 0;
  90}
  91
  92void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
  93                              u8 *hw_ste_p, u64 miss_addr)
  94{
  95        ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
  96}
  97
  98static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
  99                                    struct mlx5dr_ste *ste, u64 miss_addr)
 100{
 101        u8 *hw_ste_p = ste->hw_ste;
 102
 103        ste_ctx->set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
 104        ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
 105        dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
 106}
 107
 108void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
 109                             u8 *hw_ste, u64 icm_addr, u32 ht_size)
 110{
 111        ste_ctx->set_hit_addr(hw_ste, icm_addr, ht_size);
 112}
 113
 114u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
 115{
 116        u32 index = ste - ste->htbl->ste_arr;
 117
 118        return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index;
 119}
 120
 121u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
 122{
 123        u32 index = ste - ste->htbl->ste_arr;
 124
 125        return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index;
 126}
 127
 128struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
 129{
 130        u32 index = ste - ste->htbl->ste_arr;
 131
 132        return &ste->htbl->miss_list[index];
 133}
 134
 135static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
 136                                   struct mlx5dr_ste *ste,
 137                                   struct mlx5dr_ste_htbl *next_htbl)
 138{
 139        struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
 140        u8 *hw_ste = ste->hw_ste;
 141
 142        ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
 143        ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
 144        ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
 145
 146        dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
 147}
 148
 149bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
 150                                u8 ste_location)
 151{
 152        return ste_location == nic_matcher->num_of_builders;
 153}
 154
 155/* Replace relevant fields, except of:
 156 * htbl - keep the origin htbl
 157 * miss_list + list - already took the src from the list.
 158 * icm_addr/mr_addr - depends on the hosting table.
 159 *
 160 * Before:
 161 * | a | -> | b | -> | c | ->
 162 *
 163 * After:
 164 * | a | -> | c | ->
 165 * While the data that was in b copied to a.
 166 */
 167static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
 168{
 169        memcpy(dst->hw_ste, src->hw_ste, DR_STE_SIZE_REDUCED);
 170        dst->next_htbl = src->next_htbl;
 171        if (dst->next_htbl)
 172                dst->next_htbl->pointing_ste = dst;
 173
 174        dst->refcount = src->refcount;
 175
 176        INIT_LIST_HEAD(&dst->rule_list);
 177        list_splice_tail_init(&src->rule_list, &dst->rule_list);
 178}
 179
 180/* Free ste which is the head and the only one in miss_list */
 181static void
 182dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
 183                       struct mlx5dr_ste *ste,
 184                       struct mlx5dr_matcher_rx_tx *nic_matcher,
 185                       struct mlx5dr_ste_send_info *ste_info_head,
 186                       struct list_head *send_ste_list,
 187                       struct mlx5dr_ste_htbl *stats_tbl)
 188{
 189        u8 tmp_data_ste[DR_STE_SIZE] = {};
 190        struct mlx5dr_ste tmp_ste = {};
 191        u64 miss_addr;
 192
 193        tmp_ste.hw_ste = tmp_data_ste;
 194
 195        /* Use temp ste because dr_ste_always_miss_addr
 196         * touches bit_mask area which doesn't exist at ste->hw_ste.
 197         */
 198        memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
 199        miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
 200        dr_ste_always_miss_addr(ste_ctx, &tmp_ste, miss_addr);
 201        memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
 202
 203        list_del_init(&ste->miss_list_node);
 204
 205        /* Write full STE size in order to have "always_miss" */
 206        mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
 207                                                  0, tmp_data_ste,
 208                                                  ste_info_head,
 209                                                  send_ste_list,
 210                                                  true /* Copy data */);
 211
 212        stats_tbl->ctrl.num_of_valid_entries--;
 213}
 214
 215/* Free ste which is the head but NOT the only one in miss_list:
 216 * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0
 217 */
 218static void
 219dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
 220                        struct mlx5dr_ste *ste,
 221                        struct mlx5dr_ste *next_ste,
 222                        struct mlx5dr_ste_send_info *ste_info_head,
 223                        struct list_head *send_ste_list,
 224                        struct mlx5dr_ste_htbl *stats_tbl)
 225
 226{
 227        struct mlx5dr_ste_htbl *next_miss_htbl;
 228        u8 hw_ste[DR_STE_SIZE] = {};
 229        int sb_idx;
 230
 231        next_miss_htbl = next_ste->htbl;
 232
 233        /* Remove from the miss_list the next_ste before copy */
 234        list_del_init(&next_ste->miss_list_node);
 235
 236        /* All rule-members that use next_ste should know about that */
 237        mlx5dr_rule_update_rule_member(next_ste, ste);
 238
 239        /* Move data from next into ste */
 240        dr_ste_replace(ste, next_ste);
 241
 242        /* Copy all 64 hw_ste bytes */
 243        memcpy(hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
 244        sb_idx = ste->ste_chain_location - 1;
 245        mlx5dr_ste_set_bit_mask(hw_ste,
 246                                nic_matcher->ste_builder[sb_idx].bit_mask);
 247
 248        /* Del the htbl that contains the next_ste.
 249         * The origin htbl stay with the same number of entries.
 250         */
 251        mlx5dr_htbl_put(next_miss_htbl);
 252
 253        mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
 254                                                  0, hw_ste,
 255                                                  ste_info_head,
 256                                                  send_ste_list,
 257                                                  true /* Copy data */);
 258
 259        stats_tbl->ctrl.num_of_collisions--;
 260        stats_tbl->ctrl.num_of_valid_entries--;
 261}
 262
 263/* Free ste that is located in the middle of the miss list:
 264 * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
 265 */
 266static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
 267                                     struct mlx5dr_ste *ste,
 268                                     struct mlx5dr_ste_send_info *ste_info,
 269                                     struct list_head *send_ste_list,
 270                                     struct mlx5dr_ste_htbl *stats_tbl)
 271{
 272        struct mlx5dr_ste *prev_ste;
 273        u64 miss_addr;
 274
 275        prev_ste = list_prev_entry(ste, miss_list_node);
 276        if (WARN_ON(!prev_ste))
 277                return;
 278
 279        miss_addr = ste_ctx->get_miss_addr(ste->hw_ste);
 280        ste_ctx->set_miss_addr(prev_ste->hw_ste, miss_addr);
 281
 282        mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0,
 283                                                  prev_ste->hw_ste, ste_info,
 284                                                  send_ste_list, true /* Copy data*/);
 285
 286        list_del_init(&ste->miss_list_node);
 287
 288        stats_tbl->ctrl.num_of_valid_entries--;
 289        stats_tbl->ctrl.num_of_collisions--;
 290}
 291
 292void mlx5dr_ste_free(struct mlx5dr_ste *ste,
 293                     struct mlx5dr_matcher *matcher,
 294                     struct mlx5dr_matcher_rx_tx *nic_matcher)
 295{
 296        struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
 297        struct mlx5dr_domain *dmn = matcher->tbl->dmn;
 298        struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
 299        struct mlx5dr_ste_send_info ste_info_head;
 300        struct mlx5dr_ste *next_ste, *first_ste;
 301        bool put_on_origin_table = true;
 302        struct mlx5dr_ste_htbl *stats_tbl;
 303        LIST_HEAD(send_ste_list);
 304
 305        first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste),
 306                                     struct mlx5dr_ste, miss_list_node);
 307        stats_tbl = first_ste->htbl;
 308
 309        /* Two options:
 310         * 1. ste is head:
 311         *      a. head ste is the only ste in the miss list
 312         *      b. head ste is not the only ste in the miss-list
 313         * 2. ste is not head
 314         */
 315        if (first_ste == ste) { /* Ste is the head */
 316                struct mlx5dr_ste *last_ste;
 317
 318                last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste),
 319                                           struct mlx5dr_ste, miss_list_node);
 320                if (last_ste == first_ste)
 321                        next_ste = NULL;
 322                else
 323                        next_ste = list_next_entry(ste, miss_list_node);
 324
 325                if (!next_ste) {
 326                        /* One and only entry in the list */
 327                        dr_ste_remove_head_ste(ste_ctx, ste,
 328                                               nic_matcher,
 329                                               &ste_info_head,
 330                                               &send_ste_list,
 331                                               stats_tbl);
 332                } else {
 333                        /* First but not only entry in the list */
 334                        dr_ste_replace_head_ste(nic_matcher, ste,
 335                                                next_ste, &ste_info_head,
 336                                                &send_ste_list, stats_tbl);
 337                        put_on_origin_table = false;
 338                }
 339        } else { /* Ste in the middle of the list */
 340                dr_ste_remove_middle_ste(ste_ctx, ste,
 341                                         &ste_info_head, &send_ste_list,
 342                                         stats_tbl);
 343        }
 344
 345        /* Update HW */
 346        list_for_each_entry_safe(cur_ste_info, tmp_ste_info,
 347                                 &send_ste_list, send_list) {
 348                list_del(&cur_ste_info->send_list);
 349                mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste,
 350                                         cur_ste_info->data, cur_ste_info->size,
 351                                         cur_ste_info->offset);
 352        }
 353
 354        if (put_on_origin_table)
 355                mlx5dr_htbl_put(ste->htbl);
 356}
 357
 358bool mlx5dr_ste_equal_tag(void *src, void *dst)
 359{
 360        struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src;
 361        struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst;
 362
 363        return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
 364}
 365
 366void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
 367                                          u8 *hw_ste,
 368                                          struct mlx5dr_ste_htbl *next_htbl)
 369{
 370        struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
 371
 372        ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
 373}
 374
 375void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
 376                                     u8 *hw_ste_p, u32 ste_size)
 377{
 378        if (ste_ctx->prepare_for_postsend)
 379                ste_ctx->prepare_for_postsend(hw_ste_p, ste_size);
 380}
 381
 382/* Init one ste as a pattern for ste data array */
 383void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
 384                                  u16 gvmi,
 385                                  struct mlx5dr_domain_rx_tx *nic_dmn,
 386                                  struct mlx5dr_ste_htbl *htbl,
 387                                  u8 *formatted_ste,
 388                                  struct mlx5dr_htbl_connect_info *connect_info)
 389{
 390        struct mlx5dr_ste ste = {};
 391
 392        ste_ctx->ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi);
 393        ste.hw_ste = formatted_ste;
 394
 395        if (connect_info->type == CONNECT_HIT)
 396                dr_ste_always_hit_htbl(ste_ctx, &ste, connect_info->hit_next_htbl);
 397        else
 398                dr_ste_always_miss_addr(ste_ctx, &ste, connect_info->miss_icm_addr);
 399}
 400
 401int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
 402                                      struct mlx5dr_domain_rx_tx *nic_dmn,
 403                                      struct mlx5dr_ste_htbl *htbl,
 404                                      struct mlx5dr_htbl_connect_info *connect_info,
 405                                      bool update_hw_ste)
 406{
 407        u8 formatted_ste[DR_STE_SIZE] = {};
 408
 409        mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
 410                                     dmn->info.caps.gvmi,
 411                                     nic_dmn,
 412                                     htbl,
 413                                     formatted_ste,
 414                                     connect_info);
 415
 416        return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste);
 417}
 418
 419int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
 420                                struct mlx5dr_matcher_rx_tx *nic_matcher,
 421                                struct mlx5dr_ste *ste,
 422                                u8 *cur_hw_ste,
 423                                enum mlx5dr_icm_chunk_size log_table_size)
 424{
 425        struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
 426        struct mlx5dr_domain *dmn = matcher->tbl->dmn;
 427        struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
 428        struct mlx5dr_htbl_connect_info info;
 429        struct mlx5dr_ste_htbl *next_htbl;
 430
 431        if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
 432                u16 next_lu_type;
 433                u16 byte_mask;
 434
 435                next_lu_type = ste_ctx->get_next_lu_type(cur_hw_ste);
 436                byte_mask = ste_ctx->get_byte_mask(cur_hw_ste);
 437
 438                next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
 439                                                  log_table_size,
 440                                                  next_lu_type,
 441                                                  byte_mask);
 442                if (!next_htbl) {
 443                        mlx5dr_dbg(dmn, "Failed allocating table\n");
 444                        return -ENOMEM;
 445                }
 446
 447                /* Write new table to HW */
 448                info.type = CONNECT_MISS;
 449                info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
 450                if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
 451                                                      &info, false)) {
 452                        mlx5dr_info(dmn, "Failed writing table to HW\n");
 453                        goto free_table;
 454                }
 455
 456                mlx5dr_ste_set_hit_addr_by_next_htbl(ste_ctx,
 457                                                     cur_hw_ste, next_htbl);
 458                ste->next_htbl = next_htbl;
 459                next_htbl->pointing_ste = ste;
 460        }
 461
 462        return 0;
 463
 464free_table:
 465        mlx5dr_ste_htbl_free(next_htbl);
 466        return -ENOENT;
 467}
 468
 469static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl)
 470{
 471        struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
 472        int num_of_entries;
 473
 474        htbl->ctrl.may_grow = true;
 475
 476        if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
 477                htbl->ctrl.may_grow = false;
 478
 479        /* Threshold is 50%, one is added to table of size 1 */
 480        num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size);
 481        ctrl->increase_threshold = (num_of_entries + 1) / 2;
 482}
 483
 484struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
 485                                              enum mlx5dr_icm_chunk_size chunk_size,
 486                                              u16 lu_type, u16 byte_mask)
 487{
 488        struct mlx5dr_icm_chunk *chunk;
 489        struct mlx5dr_ste_htbl *htbl;
 490        int i;
 491
 492        htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
 493        if (!htbl)
 494                return NULL;
 495
 496        chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size);
 497        if (!chunk)
 498                goto out_free_htbl;
 499
 500        htbl->chunk = chunk;
 501        htbl->lu_type = lu_type;
 502        htbl->byte_mask = byte_mask;
 503        htbl->ste_arr = chunk->ste_arr;
 504        htbl->hw_ste_arr = chunk->hw_ste_arr;
 505        htbl->miss_list = chunk->miss_list;
 506        htbl->refcount = 0;
 507
 508        for (i = 0; i < chunk->num_of_entries; i++) {
 509                struct mlx5dr_ste *ste = &htbl->ste_arr[i];
 510
 511                ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
 512                ste->htbl = htbl;
 513                ste->refcount = 0;
 514                INIT_LIST_HEAD(&ste->miss_list_node);
 515                INIT_LIST_HEAD(&htbl->miss_list[i]);
 516                INIT_LIST_HEAD(&ste->rule_list);
 517        }
 518
 519        htbl->chunk_size = chunk_size;
 520        dr_ste_set_ctrl(htbl);
 521        return htbl;
 522
 523out_free_htbl:
 524        kfree(htbl);
 525        return NULL;
 526}
 527
 528int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
 529{
 530        if (htbl->refcount)
 531                return -EBUSY;
 532
 533        mlx5dr_icm_free_chunk(htbl->chunk);
 534        kfree(htbl);
 535        return 0;
 536}
 537
 538void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
 539                               struct mlx5dr_domain *dmn,
 540                               u8 *action_type_set,
 541                               u8 *hw_ste_arr,
 542                               struct mlx5dr_ste_actions_attr *attr,
 543                               u32 *added_stes)
 544{
 545        ste_ctx->set_actions_tx(dmn, action_type_set, hw_ste_arr,
 546                                attr, added_stes);
 547}
 548
 549void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
 550                               struct mlx5dr_domain *dmn,
 551                               u8 *action_type_set,
 552                               u8 *hw_ste_arr,
 553                               struct mlx5dr_ste_actions_attr *attr,
 554                               u32 *added_stes)
 555{
 556        ste_ctx->set_actions_rx(dmn, action_type_set, hw_ste_arr,
 557                                attr, added_stes);
 558}
 559
 560const struct mlx5dr_ste_action_modify_field *
 561mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field)
 562{
 563        const struct mlx5dr_ste_action_modify_field *hw_field;
 564
 565        if (sw_field >= ste_ctx->modify_field_arr_sz)
 566                return NULL;
 567
 568        hw_field = &ste_ctx->modify_field_arr[sw_field];
 569        if (!hw_field->end && !hw_field->start)
 570                return NULL;
 571
 572        return hw_field;
 573}
 574
 575void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
 576                               __be64 *hw_action,
 577                               u8 hw_field,
 578                               u8 shifter,
 579                               u8 length,
 580                               u32 data)
 581{
 582        ste_ctx->set_action_set((u8 *)hw_action,
 583                                hw_field, shifter, length, data);
 584}
 585
 586void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
 587                               __be64 *hw_action,
 588                               u8 hw_field,
 589                               u8 shifter,
 590                               u8 length,
 591                               u32 data)
 592{
 593        ste_ctx->set_action_add((u8 *)hw_action,
 594                                hw_field, shifter, length, data);
 595}
 596
 597void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
 598                                __be64 *hw_action,
 599                                u8 dst_hw_field,
 600                                u8 dst_shifter,
 601                                u8 dst_len,
 602                                u8 src_hw_field,
 603                                u8 src_shifter)
 604{
 605        ste_ctx->set_action_copy((u8 *)hw_action,
 606                                 dst_hw_field, dst_shifter, dst_len,
 607                                 src_hw_field, src_shifter);
 608}
 609
 610int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
 611                                        void *data, u32 data_sz,
 612                                        u8 *hw_action, u32 hw_action_sz,
 613                                        u16 *used_hw_action_num)
 614{
 615        /* Only Ethernet frame is supported, with VLAN (18) or without (14) */
 616        if (data_sz != HDR_LEN_L2 && data_sz != HDR_LEN_L2_W_VLAN)
 617                return -EINVAL;
 618
 619        return ste_ctx->set_action_decap_l3_list(data, data_sz,
 620                                                 hw_action, hw_action_sz,
 621                                                 used_hw_action_num);
 622}
 623
 624int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
 625                               u8 match_criteria,
 626                               struct mlx5dr_match_param *mask,
 627                               struct mlx5dr_match_param *value)
 628{
 629        if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
 630                if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
 631                        mlx5dr_err(dmn,
 632                                   "Partial mask source_port is not supported\n");
 633                        return -EINVAL;
 634                }
 635                if (mask->misc.source_eswitch_owner_vhca_id &&
 636                    mask->misc.source_eswitch_owner_vhca_id != 0xffff) {
 637                        mlx5dr_err(dmn,
 638                                   "Partial mask source_eswitch_owner_vhca_id is not supported\n");
 639                        return -EINVAL;
 640                }
 641        }
 642
 643        return 0;
 644}
 645
 646int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
 647                             struct mlx5dr_matcher_rx_tx *nic_matcher,
 648                             struct mlx5dr_match_param *value,
 649                             u8 *ste_arr)
 650{
 651        struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
 652        struct mlx5dr_domain *dmn = matcher->tbl->dmn;
 653        struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
 654        struct mlx5dr_ste_build *sb;
 655        int ret, i;
 656
 657        ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
 658                                         &matcher->mask, value);
 659        if (ret)
 660                return ret;
 661
 662        sb = nic_matcher->ste_builder;
 663        for (i = 0; i < nic_matcher->num_of_builders; i++) {
 664                ste_ctx->ste_init(ste_arr,
 665                                  sb->lu_type,
 666                                  nic_dmn->ste_type,
 667                                  dmn->info.caps.gvmi);
 668
 669                mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
 670
 671                ret = sb->ste_build_tag_func(value, sb, dr_ste_get_tag(ste_arr));
 672                if (ret)
 673                        return ret;
 674
 675                /* Connect the STEs */
 676                if (i < (nic_matcher->num_of_builders - 1)) {
 677                        /* Need the next builder for these fields,
 678                         * not relevant for the last ste in the chain.
 679                         */
 680                        sb++;
 681                        ste_ctx->set_next_lu_type(ste_arr, sb->lu_type);
 682                        ste_ctx->set_byte_mask(ste_arr, sb->byte_mask);
 683                }
 684                ste_arr += DR_STE_SIZE;
 685        }
 686        return 0;
 687}
 688
 689static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
 690{
 691        spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present);
 692        spec->gre_k_present = MLX5_GET(fte_match_set_misc, mask, gre_k_present);
 693        spec->gre_s_present = MLX5_GET(fte_match_set_misc, mask, gre_s_present);
 694        spec->source_vhca_port = MLX5_GET(fte_match_set_misc, mask, source_vhca_port);
 695        spec->source_sqn = MLX5_GET(fte_match_set_misc, mask, source_sqn);
 696
 697        spec->source_port = MLX5_GET(fte_match_set_misc, mask, source_port);
 698        spec->source_eswitch_owner_vhca_id = MLX5_GET(fte_match_set_misc, mask,
 699                                                      source_eswitch_owner_vhca_id);
 700
 701        spec->outer_second_prio = MLX5_GET(fte_match_set_misc, mask, outer_second_prio);
 702        spec->outer_second_cfi = MLX5_GET(fte_match_set_misc, mask, outer_second_cfi);
 703        spec->outer_second_vid = MLX5_GET(fte_match_set_misc, mask, outer_second_vid);
 704        spec->inner_second_prio = MLX5_GET(fte_match_set_misc, mask, inner_second_prio);
 705        spec->inner_second_cfi = MLX5_GET(fte_match_set_misc, mask, inner_second_cfi);
 706        spec->inner_second_vid = MLX5_GET(fte_match_set_misc, mask, inner_second_vid);
 707
 708        spec->outer_second_cvlan_tag =
 709                MLX5_GET(fte_match_set_misc, mask, outer_second_cvlan_tag);
 710        spec->inner_second_cvlan_tag =
 711                MLX5_GET(fte_match_set_misc, mask, inner_second_cvlan_tag);
 712        spec->outer_second_svlan_tag =
 713                MLX5_GET(fte_match_set_misc, mask, outer_second_svlan_tag);
 714        spec->inner_second_svlan_tag =
 715                MLX5_GET(fte_match_set_misc, mask, inner_second_svlan_tag);
 716
 717        spec->gre_protocol = MLX5_GET(fte_match_set_misc, mask, gre_protocol);
 718
 719        spec->gre_key_h = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.hi);
 720        spec->gre_key_l = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.lo);
 721
 722        spec->vxlan_vni = MLX5_GET(fte_match_set_misc, mask, vxlan_vni);
 723
 724        spec->geneve_vni = MLX5_GET(fte_match_set_misc, mask, geneve_vni);
 725        spec->geneve_oam = MLX5_GET(fte_match_set_misc, mask, geneve_oam);
 726
 727        spec->outer_ipv6_flow_label =
 728                MLX5_GET(fte_match_set_misc, mask, outer_ipv6_flow_label);
 729
 730        spec->inner_ipv6_flow_label =
 731                MLX5_GET(fte_match_set_misc, mask, inner_ipv6_flow_label);
 732
 733        spec->geneve_opt_len = MLX5_GET(fte_match_set_misc, mask, geneve_opt_len);
 734        spec->geneve_protocol_type =
 735                MLX5_GET(fte_match_set_misc, mask, geneve_protocol_type);
 736
 737        spec->bth_dst_qp = MLX5_GET(fte_match_set_misc, mask, bth_dst_qp);
 738}
 739
 740static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec)
 741{
 742        __be32 raw_ip[4];
 743
 744        spec->smac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16);
 745
 746        spec->smac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_15_0);
 747        spec->ethertype = MLX5_GET(fte_match_set_lyr_2_4, mask, ethertype);
 748
 749        spec->dmac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_47_16);
 750
 751        spec->dmac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_15_0);
 752        spec->first_prio = MLX5_GET(fte_match_set_lyr_2_4, mask, first_prio);
 753        spec->first_cfi = MLX5_GET(fte_match_set_lyr_2_4, mask, first_cfi);
 754        spec->first_vid = MLX5_GET(fte_match_set_lyr_2_4, mask, first_vid);
 755
 756        spec->ip_protocol = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_protocol);
 757        spec->ip_dscp = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_dscp);
 758        spec->ip_ecn = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_ecn);
 759        spec->cvlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, cvlan_tag);
 760        spec->svlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, svlan_tag);
 761        spec->frag = MLX5_GET(fte_match_set_lyr_2_4, mask, frag);
 762        spec->ip_version = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_version);
 763        spec->tcp_flags = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_flags);
 764        spec->tcp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_sport);
 765        spec->tcp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_dport);
 766
 767        spec->ttl_hoplimit = MLX5_GET(fte_match_set_lyr_2_4, mask, ttl_hoplimit);
 768
 769        spec->udp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_sport);
 770        spec->udp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_dport);
 771
 772        memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
 773                                    src_ipv4_src_ipv6.ipv6_layout.ipv6),
 774                                    sizeof(raw_ip));
 775
 776        spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]);
 777        spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]);
 778        spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]);
 779        spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]);
 780
 781        memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
 782                                    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
 783                                    sizeof(raw_ip));
 784
 785        spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]);
 786        spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]);
 787        spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]);
 788        spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]);
 789}
 790
 791static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec)
 792{
 793        spec->outer_first_mpls_label =
 794                MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_label);
 795        spec->outer_first_mpls_exp =
 796                MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp);
 797        spec->outer_first_mpls_s_bos =
 798                MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos);
 799        spec->outer_first_mpls_ttl =
 800                MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl);
 801        spec->inner_first_mpls_label =
 802                MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_label);
 803        spec->inner_first_mpls_exp =
 804                MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp);
 805        spec->inner_first_mpls_s_bos =
 806                MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos);
 807        spec->inner_first_mpls_ttl =
 808                MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl);
 809        spec->outer_first_mpls_over_gre_label =
 810                MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label);
 811        spec->outer_first_mpls_over_gre_exp =
 812                MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp);
 813        spec->outer_first_mpls_over_gre_s_bos =
 814                MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos);
 815        spec->outer_first_mpls_over_gre_ttl =
 816                MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl);
 817        spec->outer_first_mpls_over_udp_label =
 818                MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label);
 819        spec->outer_first_mpls_over_udp_exp =
 820                MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp);
 821        spec->outer_first_mpls_over_udp_s_bos =
 822                MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos);
 823        spec->outer_first_mpls_over_udp_ttl =
 824                MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl);
 825        spec->metadata_reg_c_7 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_7);
 826        spec->metadata_reg_c_6 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_6);
 827        spec->metadata_reg_c_5 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_5);
 828        spec->metadata_reg_c_4 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_4);
 829        spec->metadata_reg_c_3 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_3);
 830        spec->metadata_reg_c_2 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_2);
 831        spec->metadata_reg_c_1 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_1);
 832        spec->metadata_reg_c_0 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_0);
 833        spec->metadata_reg_a = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_a);
 834}
 835
 836static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec)
 837{
 838        spec->inner_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_seq_num);
 839        spec->outer_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_seq_num);
 840        spec->inner_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_ack_num);
 841        spec->outer_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_ack_num);
 842        spec->outer_vxlan_gpe_vni =
 843                MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_vni);
 844        spec->outer_vxlan_gpe_next_protocol =
 845                MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol);
 846        spec->outer_vxlan_gpe_flags =
 847                MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_flags);
 848        spec->icmpv4_header_data = MLX5_GET(fte_match_set_misc3, mask, icmp_header_data);
 849        spec->icmpv6_header_data =
 850                MLX5_GET(fte_match_set_misc3, mask, icmpv6_header_data);
 851        spec->icmpv4_type = MLX5_GET(fte_match_set_misc3, mask, icmp_type);
 852        spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code);
 853        spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type);
 854        spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code);
 855        spec->geneve_tlv_option_0_data =
 856                MLX5_GET(fte_match_set_misc3, mask, geneve_tlv_option_0_data);
 857        spec->gtpu_msg_flags = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_flags);
 858        spec->gtpu_msg_type = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_type);
 859        spec->gtpu_teid = MLX5_GET(fte_match_set_misc3, mask, gtpu_teid);
 860        spec->gtpu_dw_0 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_0);
 861        spec->gtpu_dw_2 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_2);
 862        spec->gtpu_first_ext_dw_0 =
 863                MLX5_GET(fte_match_set_misc3, mask, gtpu_first_ext_dw_0);
 864}
 865
 866static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec)
 867{
 868        spec->prog_sample_field_id_0 =
 869                MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_0);
 870        spec->prog_sample_field_value_0 =
 871                MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_0);
 872        spec->prog_sample_field_id_1 =
 873                MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_1);
 874        spec->prog_sample_field_value_1 =
 875                MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_1);
 876        spec->prog_sample_field_id_2 =
 877                MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_2);
 878        spec->prog_sample_field_value_2 =
 879                MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_2);
 880        spec->prog_sample_field_id_3 =
 881                MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_3);
 882        spec->prog_sample_field_value_3 =
 883                MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_3);
 884}
 885
 886void mlx5dr_ste_copy_param(u8 match_criteria,
 887                           struct mlx5dr_match_param *set_param,
 888                           struct mlx5dr_match_parameters *mask)
 889{
 890        u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {};
 891        u8 *data = (u8 *)mask->match_buf;
 892        size_t param_location;
 893        void *buff;
 894
 895        if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
 896                if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) {
 897                        memcpy(tail_param, data, mask->match_sz);
 898                        buff = tail_param;
 899                } else {
 900                        buff = mask->match_buf;
 901                }
 902                dr_ste_copy_mask_spec(buff, &set_param->outer);
 903        }
 904        param_location = sizeof(struct mlx5dr_match_spec);
 905
 906        if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
 907                if (mask->match_sz < param_location +
 908                    sizeof(struct mlx5dr_match_misc)) {
 909                        memcpy(tail_param, data + param_location,
 910                               mask->match_sz - param_location);
 911                        buff = tail_param;
 912                } else {
 913                        buff = data + param_location;
 914                }
 915                dr_ste_copy_mask_misc(buff, &set_param->misc);
 916        }
 917        param_location += sizeof(struct mlx5dr_match_misc);
 918
 919        if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
 920                if (mask->match_sz < param_location +
 921                    sizeof(struct mlx5dr_match_spec)) {
 922                        memcpy(tail_param, data + param_location,
 923                               mask->match_sz - param_location);
 924                        buff = tail_param;
 925                } else {
 926                        buff = data + param_location;
 927                }
 928                dr_ste_copy_mask_spec(buff, &set_param->inner);
 929        }
 930        param_location += sizeof(struct mlx5dr_match_spec);
 931
 932        if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
 933                if (mask->match_sz < param_location +
 934                    sizeof(struct mlx5dr_match_misc2)) {
 935                        memcpy(tail_param, data + param_location,
 936                               mask->match_sz - param_location);
 937                        buff = tail_param;
 938                } else {
 939                        buff = data + param_location;
 940                }
 941                dr_ste_copy_mask_misc2(buff, &set_param->misc2);
 942        }
 943
 944        param_location += sizeof(struct mlx5dr_match_misc2);
 945
 946        if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
 947                if (mask->match_sz < param_location +
 948                    sizeof(struct mlx5dr_match_misc3)) {
 949                        memcpy(tail_param, data + param_location,
 950                               mask->match_sz - param_location);
 951                        buff = tail_param;
 952                } else {
 953                        buff = data + param_location;
 954                }
 955                dr_ste_copy_mask_misc3(buff, &set_param->misc3);
 956        }
 957
 958        param_location += sizeof(struct mlx5dr_match_misc3);
 959
 960        if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
 961                if (mask->match_sz < param_location +
 962                    sizeof(struct mlx5dr_match_misc4)) {
 963                        memcpy(tail_param, data + param_location,
 964                               mask->match_sz - param_location);
 965                        buff = tail_param;
 966                } else {
 967                        buff = data + param_location;
 968                }
 969                dr_ste_copy_mask_misc4(buff, &set_param->misc4);
 970        }
 971}
 972
 973void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
 974                                     struct mlx5dr_ste_build *sb,
 975                                     struct mlx5dr_match_param *mask,
 976                                     bool inner, bool rx)
 977{
 978        sb->rx = rx;
 979        sb->inner = inner;
 980        ste_ctx->build_eth_l2_src_dst_init(sb, mask);
 981}
 982
 983void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
 984                                      struct mlx5dr_ste_build *sb,
 985                                      struct mlx5dr_match_param *mask,
 986                                      bool inner, bool rx)
 987{
 988        sb->rx = rx;
 989        sb->inner = inner;
 990        ste_ctx->build_eth_l3_ipv6_dst_init(sb, mask);
 991}
 992
 993void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
 994                                      struct mlx5dr_ste_build *sb,
 995                                      struct mlx5dr_match_param *mask,
 996                                      bool inner, bool rx)
 997{
 998        sb->rx = rx;
 999        sb->inner = inner;
1000        ste_ctx->build_eth_l3_ipv6_src_init(sb, mask);
1001}
1002
1003void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
1004                                          struct mlx5dr_ste_build *sb,
1005                                          struct mlx5dr_match_param *mask,
1006                                          bool inner, bool rx)
1007{
1008        sb->rx = rx;
1009        sb->inner = inner;
1010        ste_ctx->build_eth_l3_ipv4_5_tuple_init(sb, mask);
1011}
1012
1013void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
1014                                 struct mlx5dr_ste_build *sb,
1015                                 struct mlx5dr_match_param *mask,
1016                                 bool inner, bool rx)
1017{
1018        sb->rx = rx;
1019        sb->inner = inner;
1020        ste_ctx->build_eth_l2_src_init(sb, mask);
1021}
1022
1023void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
1024                                 struct mlx5dr_ste_build *sb,
1025                                 struct mlx5dr_match_param *mask,
1026                                 bool inner, bool rx)
1027{
1028        sb->rx = rx;
1029        sb->inner = inner;
1030        ste_ctx->build_eth_l2_dst_init(sb, mask);
1031}
1032
1033void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
1034                                 struct mlx5dr_ste_build *sb,
1035                                 struct mlx5dr_match_param *mask, bool inner, bool rx)
1036{
1037        sb->rx = rx;
1038        sb->inner = inner;
1039        ste_ctx->build_eth_l2_tnl_init(sb, mask);
1040}
1041
1042void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
1043                                       struct mlx5dr_ste_build *sb,
1044                                       struct mlx5dr_match_param *mask,
1045                                       bool inner, bool rx)
1046{
1047        sb->rx = rx;
1048        sb->inner = inner;
1049        ste_ctx->build_eth_l3_ipv4_misc_init(sb, mask);
1050}
1051
1052void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
1053                                     struct mlx5dr_ste_build *sb,
1054                                     struct mlx5dr_match_param *mask,
1055                                     bool inner, bool rx)
1056{
1057        sb->rx = rx;
1058        sb->inner = inner;
1059        ste_ctx->build_eth_ipv6_l3_l4_init(sb, mask);
1060}
1061
1062static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
1063                                             struct mlx5dr_ste_build *sb,
1064                                             u8 *tag)
1065{
1066        return 0;
1067}
1068
1069void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
1070{
1071        sb->rx = rx;
1072        sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE;
1073        sb->byte_mask = 0;
1074        sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
1075}
1076
1077void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
1078                           struct mlx5dr_ste_build *sb,
1079                           struct mlx5dr_match_param *mask,
1080                           bool inner, bool rx)
1081{
1082        sb->rx = rx;
1083        sb->inner = inner;
1084        ste_ctx->build_mpls_init(sb, mask);
1085}
1086
1087void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
1088                              struct mlx5dr_ste_build *sb,
1089                              struct mlx5dr_match_param *mask,
1090                              bool inner, bool rx)
1091{
1092        sb->rx = rx;
1093        sb->inner = inner;
1094        ste_ctx->build_tnl_gre_init(sb, mask);
1095}
1096
1097void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
1098                                        struct mlx5dr_ste_build *sb,
1099                                        struct mlx5dr_match_param *mask,
1100                                        struct mlx5dr_cmd_caps *caps,
1101                                        bool inner, bool rx)
1102{
1103        sb->rx = rx;
1104        sb->inner = inner;
1105        sb->caps = caps;
1106        return ste_ctx->build_tnl_mpls_over_gre_init(sb, mask);
1107}
1108
1109void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
1110                                        struct mlx5dr_ste_build *sb,
1111                                        struct mlx5dr_match_param *mask,
1112                                        struct mlx5dr_cmd_caps *caps,
1113                                        bool inner, bool rx)
1114{
1115        sb->rx = rx;
1116        sb->inner = inner;
1117        sb->caps = caps;
1118        return ste_ctx->build_tnl_mpls_over_udp_init(sb, mask);
1119}
1120
1121void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
1122                           struct mlx5dr_ste_build *sb,
1123                           struct mlx5dr_match_param *mask,
1124                           struct mlx5dr_cmd_caps *caps,
1125                           bool inner, bool rx)
1126{
1127        sb->rx = rx;
1128        sb->inner = inner;
1129        sb->caps = caps;
1130        ste_ctx->build_icmp_init(sb, mask);
1131}
1132
1133void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
1134                                      struct mlx5dr_ste_build *sb,
1135                                      struct mlx5dr_match_param *mask,
1136                                      bool inner, bool rx)
1137{
1138        sb->rx = rx;
1139        sb->inner = inner;
1140        ste_ctx->build_general_purpose_init(sb, mask);
1141}
1142
1143void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
1144                                  struct mlx5dr_ste_build *sb,
1145                                  struct mlx5dr_match_param *mask,
1146                                  bool inner, bool rx)
1147{
1148        sb->rx = rx;
1149        sb->inner = inner;
1150        ste_ctx->build_eth_l4_misc_init(sb, mask);
1151}
1152
1153void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
1154                                    struct mlx5dr_ste_build *sb,
1155                                    struct mlx5dr_match_param *mask,
1156                                    bool inner, bool rx)
1157{
1158        sb->rx = rx;
1159        sb->inner = inner;
1160        ste_ctx->build_tnl_vxlan_gpe_init(sb, mask);
1161}
1162
1163void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
1164                                 struct mlx5dr_ste_build *sb,
1165                                 struct mlx5dr_match_param *mask,
1166                                 bool inner, bool rx)
1167{
1168        sb->rx = rx;
1169        sb->inner = inner;
1170        ste_ctx->build_tnl_geneve_init(sb, mask);
1171}
1172
1173void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
1174                                         struct mlx5dr_ste_build *sb,
1175                                         struct mlx5dr_match_param *mask,
1176                                         struct mlx5dr_cmd_caps *caps,
1177                                         bool inner, bool rx)
1178{
1179        sb->rx = rx;
1180        sb->caps = caps;
1181        sb->inner = inner;
1182        ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask);
1183}
1184
1185void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
1186                               struct mlx5dr_ste_build *sb,
1187                               struct mlx5dr_match_param *mask,
1188                               bool inner, bool rx)
1189{
1190        sb->rx = rx;
1191        sb->inner = inner;
1192        ste_ctx->build_tnl_gtpu_init(sb, mask);
1193}
1194
1195void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
1196                                             struct mlx5dr_ste_build *sb,
1197                                             struct mlx5dr_match_param *mask,
1198                                             struct mlx5dr_cmd_caps *caps,
1199                                             bool inner, bool rx)
1200{
1201        sb->rx = rx;
1202        sb->caps = caps;
1203        sb->inner = inner;
1204        ste_ctx->build_tnl_gtpu_flex_parser_0_init(sb, mask);
1205}
1206
1207void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
1208                                             struct mlx5dr_ste_build *sb,
1209                                             struct mlx5dr_match_param *mask,
1210                                             struct mlx5dr_cmd_caps *caps,
1211                                             bool inner, bool rx)
1212{
1213        sb->rx = rx;
1214        sb->caps = caps;
1215        sb->inner = inner;
1216        ste_ctx->build_tnl_gtpu_flex_parser_1_init(sb, mask);
1217}
1218
1219void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
1220                                 struct mlx5dr_ste_build *sb,
1221                                 struct mlx5dr_match_param *mask,
1222                                 bool inner, bool rx)
1223{
1224        sb->rx = rx;
1225        sb->inner = inner;
1226        ste_ctx->build_register_0_init(sb, mask);
1227}
1228
1229void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
1230                                 struct mlx5dr_ste_build *sb,
1231                                 struct mlx5dr_match_param *mask,
1232                                 bool inner, bool rx)
1233{
1234        sb->rx = rx;
1235        sb->inner = inner;
1236        ste_ctx->build_register_1_init(sb, mask);
1237}
1238
1239void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
1240                                   struct mlx5dr_ste_build *sb,
1241                                   struct mlx5dr_match_param *mask,
1242                                   struct mlx5dr_domain *dmn,
1243                                   bool inner, bool rx)
1244{
1245        /* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
1246        sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
1247
1248        sb->rx = rx;
1249        sb->dmn = dmn;
1250        sb->inner = inner;
1251        ste_ctx->build_src_gvmi_qpn_init(sb, mask);
1252}
1253
1254void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
1255                                    struct mlx5dr_ste_build *sb,
1256                                    struct mlx5dr_match_param *mask,
1257                                    bool inner, bool rx)
1258{
1259        sb->rx = rx;
1260        sb->inner = inner;
1261        ste_ctx->build_flex_parser_0_init(sb, mask);
1262}
1263
1264void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
1265                                    struct mlx5dr_ste_build *sb,
1266                                    struct mlx5dr_match_param *mask,
1267                                    bool inner, bool rx)
1268{
1269        sb->rx = rx;
1270        sb->inner = inner;
1271        ste_ctx->build_flex_parser_1_init(sb, mask);
1272}
1273
1274static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
1275        [MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
1276        [MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1,
1277};
1278
1279struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
1280{
1281        if (version > MLX5_STEERING_FORMAT_CONNECTX_6DX)
1282                return NULL;
1283
1284        return mlx5dr_ste_ctx_arr[version];
1285}
1286