1
2
3
4#include <linux/types.h>
5#include <linux/crc32.h>
6#include "dr_ste.h"
7
8struct dr_hw_ste_format {
9 u8 ctrl[DR_STE_SIZE_CTRL];
10 u8 tag[DR_STE_SIZE_TAG];
11 u8 mask[DR_STE_SIZE_MASK];
12};
13
14static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
15{
16 u32 crc = crc32(0, input_data, length);
17
18 return (__force u32)htonl(crc);
19}
20
21bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
22{
23 return caps->sw_format_ver > MLX5_STEERING_FORMAT_CONNECTX_5;
24}
25
26u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
27{
28 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
29 u8 masked[DR_STE_SIZE_TAG] = {};
30 u32 crc32, index;
31 u16 bit;
32 int i;
33
34
35 if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0)
36 return 0;
37
38
39 bit = 1 << (DR_STE_SIZE_TAG - 1);
40 for (i = 0; i < DR_STE_SIZE_TAG; i++) {
41 if (htbl->byte_mask & bit)
42 masked[i] = hw_ste->tag[i];
43
44 bit = bit >> 1;
45 }
46
47 crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
48 index = crc32 & (htbl->chunk->num_of_entries - 1);
49
50 return index;
51}
52
53u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
54{
55 u16 byte_mask = 0;
56 int i;
57
58 for (i = 0; i < DR_STE_SIZE_MASK; i++) {
59 byte_mask = byte_mask << 1;
60 if (bit_mask[i] == 0xff)
61 byte_mask |= 1;
62 }
63 return byte_mask;
64}
65
66static u8 *dr_ste_get_tag(u8 *hw_ste_p)
67{
68 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
69
70 return hw_ste->tag;
71}
72
73void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
74{
75 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
76
77 memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
78}
79
80static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
81{
82 memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
83 memset(&hw_ste->mask, 0, sizeof(hw_ste->mask));
84}
85
86static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
87{
88 hw_ste->tag[0] = 0xdc;
89 hw_ste->mask[0] = 0;
90}
91
92void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
93 u8 *hw_ste_p, u64 miss_addr)
94{
95 ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
96}
97
98static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
99 struct mlx5dr_ste *ste, u64 miss_addr)
100{
101 u8 *hw_ste_p = ste->hw_ste;
102
103 ste_ctx->set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
104 ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
105 dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
106}
107
108void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
109 u8 *hw_ste, u64 icm_addr, u32 ht_size)
110{
111 ste_ctx->set_hit_addr(hw_ste, icm_addr, ht_size);
112}
113
114u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
115{
116 u32 index = ste - ste->htbl->ste_arr;
117
118 return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index;
119}
120
121u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
122{
123 u32 index = ste - ste->htbl->ste_arr;
124
125 return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index;
126}
127
128struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
129{
130 u32 index = ste - ste->htbl->ste_arr;
131
132 return &ste->htbl->miss_list[index];
133}
134
135static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
136 struct mlx5dr_ste *ste,
137 struct mlx5dr_ste_htbl *next_htbl)
138{
139 struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
140 u8 *hw_ste = ste->hw_ste;
141
142 ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
143 ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
144 ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
145
146 dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
147}
148
149bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
150 u8 ste_location)
151{
152 return ste_location == nic_matcher->num_of_builders;
153}
154
155
156
157
158
159
160
161
162
163
164
165
166
167static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
168{
169 memcpy(dst->hw_ste, src->hw_ste, DR_STE_SIZE_REDUCED);
170 dst->next_htbl = src->next_htbl;
171 if (dst->next_htbl)
172 dst->next_htbl->pointing_ste = dst;
173
174 dst->refcount = src->refcount;
175}
176
177
178static void
179dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
180 struct mlx5dr_ste *ste,
181 struct mlx5dr_matcher_rx_tx *nic_matcher,
182 struct mlx5dr_ste_send_info *ste_info_head,
183 struct list_head *send_ste_list,
184 struct mlx5dr_ste_htbl *stats_tbl)
185{
186 u8 tmp_data_ste[DR_STE_SIZE] = {};
187 struct mlx5dr_ste tmp_ste = {};
188 u64 miss_addr;
189
190 tmp_ste.hw_ste = tmp_data_ste;
191
192
193
194
195 memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
196 miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
197 dr_ste_always_miss_addr(ste_ctx, &tmp_ste, miss_addr);
198 memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
199
200 list_del_init(&ste->miss_list_node);
201
202
203 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
204 0, tmp_data_ste,
205 ste_info_head,
206 send_ste_list,
207 true );
208
209 stats_tbl->ctrl.num_of_valid_entries--;
210}
211
212
213
214
215static void
216dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
217 struct mlx5dr_ste *ste,
218 struct mlx5dr_ste *next_ste,
219 struct mlx5dr_ste_send_info *ste_info_head,
220 struct list_head *send_ste_list,
221 struct mlx5dr_ste_htbl *stats_tbl)
222
223{
224 struct mlx5dr_ste_htbl *next_miss_htbl;
225 u8 hw_ste[DR_STE_SIZE] = {};
226 int sb_idx;
227
228 next_miss_htbl = next_ste->htbl;
229
230
231 list_del_init(&next_ste->miss_list_node);
232
233
234 dr_ste_replace(ste, next_ste);
235
236
237 mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false);
238
239
240 memcpy(hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
241 sb_idx = ste->ste_chain_location - 1;
242 mlx5dr_ste_set_bit_mask(hw_ste,
243 nic_matcher->ste_builder[sb_idx].bit_mask);
244
245
246
247
248 mlx5dr_htbl_put(next_miss_htbl);
249
250 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
251 0, hw_ste,
252 ste_info_head,
253 send_ste_list,
254 true );
255
256 stats_tbl->ctrl.num_of_collisions--;
257 stats_tbl->ctrl.num_of_valid_entries--;
258}
259
260
261
262
263static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
264 struct mlx5dr_ste *ste,
265 struct mlx5dr_ste_send_info *ste_info,
266 struct list_head *send_ste_list,
267 struct mlx5dr_ste_htbl *stats_tbl)
268{
269 struct mlx5dr_ste *prev_ste;
270 u64 miss_addr;
271
272 prev_ste = list_prev_entry(ste, miss_list_node);
273 if (WARN_ON(!prev_ste))
274 return;
275
276 miss_addr = ste_ctx->get_miss_addr(ste->hw_ste);
277 ste_ctx->set_miss_addr(prev_ste->hw_ste, miss_addr);
278
279 mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0,
280 prev_ste->hw_ste, ste_info,
281 send_ste_list, true );
282
283 list_del_init(&ste->miss_list_node);
284
285 stats_tbl->ctrl.num_of_valid_entries--;
286 stats_tbl->ctrl.num_of_collisions--;
287}
288
289void mlx5dr_ste_free(struct mlx5dr_ste *ste,
290 struct mlx5dr_matcher *matcher,
291 struct mlx5dr_matcher_rx_tx *nic_matcher)
292{
293 struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
294 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
295 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
296 struct mlx5dr_ste_send_info ste_info_head;
297 struct mlx5dr_ste *next_ste, *first_ste;
298 bool put_on_origin_table = true;
299 struct mlx5dr_ste_htbl *stats_tbl;
300 LIST_HEAD(send_ste_list);
301
302 first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste),
303 struct mlx5dr_ste, miss_list_node);
304 stats_tbl = first_ste->htbl;
305
306
307
308
309
310
311
312 if (first_ste == ste) {
313 struct mlx5dr_ste *last_ste;
314
315 last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste),
316 struct mlx5dr_ste, miss_list_node);
317 if (last_ste == first_ste)
318 next_ste = NULL;
319 else
320 next_ste = list_next_entry(ste, miss_list_node);
321
322 if (!next_ste) {
323
324 dr_ste_remove_head_ste(ste_ctx, ste,
325 nic_matcher,
326 &ste_info_head,
327 &send_ste_list,
328 stats_tbl);
329 } else {
330
331 dr_ste_replace_head_ste(nic_matcher, ste,
332 next_ste, &ste_info_head,
333 &send_ste_list, stats_tbl);
334 put_on_origin_table = false;
335 }
336 } else {
337 dr_ste_remove_middle_ste(ste_ctx, ste,
338 &ste_info_head, &send_ste_list,
339 stats_tbl);
340 }
341
342
343 list_for_each_entry_safe(cur_ste_info, tmp_ste_info,
344 &send_ste_list, send_list) {
345 list_del(&cur_ste_info->send_list);
346 mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste,
347 cur_ste_info->data, cur_ste_info->size,
348 cur_ste_info->offset);
349 }
350
351 if (put_on_origin_table)
352 mlx5dr_htbl_put(ste->htbl);
353}
354
355bool mlx5dr_ste_equal_tag(void *src, void *dst)
356{
357 struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src;
358 struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst;
359
360 return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
361}
362
363void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
364 u8 *hw_ste,
365 struct mlx5dr_ste_htbl *next_htbl)
366{
367 struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
368
369 ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
370}
371
372void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
373 u8 *hw_ste_p, u32 ste_size)
374{
375 if (ste_ctx->prepare_for_postsend)
376 ste_ctx->prepare_for_postsend(hw_ste_p, ste_size);
377}
378
379
380void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
381 u16 gvmi,
382 enum mlx5dr_domain_nic_type nic_type,
383 struct mlx5dr_ste_htbl *htbl,
384 u8 *formatted_ste,
385 struct mlx5dr_htbl_connect_info *connect_info)
386{
387 bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
388 struct mlx5dr_ste ste = {};
389
390 ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi);
391 ste.hw_ste = formatted_ste;
392
393 if (connect_info->type == CONNECT_HIT)
394 dr_ste_always_hit_htbl(ste_ctx, &ste, connect_info->hit_next_htbl);
395 else
396 dr_ste_always_miss_addr(ste_ctx, &ste, connect_info->miss_icm_addr);
397}
398
399int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
400 struct mlx5dr_domain_rx_tx *nic_dmn,
401 struct mlx5dr_ste_htbl *htbl,
402 struct mlx5dr_htbl_connect_info *connect_info,
403 bool update_hw_ste)
404{
405 u8 formatted_ste[DR_STE_SIZE] = {};
406
407 mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
408 dmn->info.caps.gvmi,
409 nic_dmn->type,
410 htbl,
411 formatted_ste,
412 connect_info);
413
414 return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste);
415}
416
417int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
418 struct mlx5dr_matcher_rx_tx *nic_matcher,
419 struct mlx5dr_ste *ste,
420 u8 *cur_hw_ste,
421 enum mlx5dr_icm_chunk_size log_table_size)
422{
423 struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
424 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
425 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
426 struct mlx5dr_htbl_connect_info info;
427 struct mlx5dr_ste_htbl *next_htbl;
428
429 if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
430 u16 next_lu_type;
431 u16 byte_mask;
432
433 next_lu_type = ste_ctx->get_next_lu_type(cur_hw_ste);
434 byte_mask = ste_ctx->get_byte_mask(cur_hw_ste);
435
436 next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
437 log_table_size,
438 next_lu_type,
439 byte_mask);
440 if (!next_htbl) {
441 mlx5dr_dbg(dmn, "Failed allocating table\n");
442 return -ENOMEM;
443 }
444
445
446 info.type = CONNECT_MISS;
447 info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
448 if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
449 &info, false)) {
450 mlx5dr_info(dmn, "Failed writing table to HW\n");
451 goto free_table;
452 }
453
454 mlx5dr_ste_set_hit_addr_by_next_htbl(ste_ctx,
455 cur_hw_ste, next_htbl);
456 ste->next_htbl = next_htbl;
457 next_htbl->pointing_ste = ste;
458 }
459
460 return 0;
461
462free_table:
463 mlx5dr_ste_htbl_free(next_htbl);
464 return -ENOENT;
465}
466
467struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
468 enum mlx5dr_icm_chunk_size chunk_size,
469 u16 lu_type, u16 byte_mask)
470{
471 struct mlx5dr_icm_chunk *chunk;
472 struct mlx5dr_ste_htbl *htbl;
473 int i;
474
475 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
476 if (!htbl)
477 return NULL;
478
479 chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size);
480 if (!chunk)
481 goto out_free_htbl;
482
483 htbl->chunk = chunk;
484 htbl->lu_type = lu_type;
485 htbl->byte_mask = byte_mask;
486 htbl->ste_arr = chunk->ste_arr;
487 htbl->hw_ste_arr = chunk->hw_ste_arr;
488 htbl->miss_list = chunk->miss_list;
489 htbl->refcount = 0;
490
491 for (i = 0; i < chunk->num_of_entries; i++) {
492 struct mlx5dr_ste *ste = &htbl->ste_arr[i];
493
494 ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
495 ste->htbl = htbl;
496 ste->refcount = 0;
497 INIT_LIST_HEAD(&ste->miss_list_node);
498 INIT_LIST_HEAD(&htbl->miss_list[i]);
499 }
500
501 htbl->chunk_size = chunk_size;
502 return htbl;
503
504out_free_htbl:
505 kfree(htbl);
506 return NULL;
507}
508
509int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
510{
511 if (htbl->refcount)
512 return -EBUSY;
513
514 mlx5dr_icm_free_chunk(htbl->chunk);
515 kfree(htbl);
516 return 0;
517}
518
519void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
520 struct mlx5dr_domain *dmn,
521 u8 *action_type_set,
522 u8 *hw_ste_arr,
523 struct mlx5dr_ste_actions_attr *attr,
524 u32 *added_stes)
525{
526 ste_ctx->set_actions_tx(dmn, action_type_set, hw_ste_arr,
527 attr, added_stes);
528}
529
530void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
531 struct mlx5dr_domain *dmn,
532 u8 *action_type_set,
533 u8 *hw_ste_arr,
534 struct mlx5dr_ste_actions_attr *attr,
535 u32 *added_stes)
536{
537 ste_ctx->set_actions_rx(dmn, action_type_set, hw_ste_arr,
538 attr, added_stes);
539}
540
541const struct mlx5dr_ste_action_modify_field *
542mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field)
543{
544 const struct mlx5dr_ste_action_modify_field *hw_field;
545
546 if (sw_field >= ste_ctx->modify_field_arr_sz)
547 return NULL;
548
549 hw_field = &ste_ctx->modify_field_arr[sw_field];
550 if (!hw_field->end && !hw_field->start)
551 return NULL;
552
553 return hw_field;
554}
555
556void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
557 __be64 *hw_action,
558 u8 hw_field,
559 u8 shifter,
560 u8 length,
561 u32 data)
562{
563 ste_ctx->set_action_set((u8 *)hw_action,
564 hw_field, shifter, length, data);
565}
566
567void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
568 __be64 *hw_action,
569 u8 hw_field,
570 u8 shifter,
571 u8 length,
572 u32 data)
573{
574 ste_ctx->set_action_add((u8 *)hw_action,
575 hw_field, shifter, length, data);
576}
577
578void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
579 __be64 *hw_action,
580 u8 dst_hw_field,
581 u8 dst_shifter,
582 u8 dst_len,
583 u8 src_hw_field,
584 u8 src_shifter)
585{
586 ste_ctx->set_action_copy((u8 *)hw_action,
587 dst_hw_field, dst_shifter, dst_len,
588 src_hw_field, src_shifter);
589}
590
591int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
592 void *data, u32 data_sz,
593 u8 *hw_action, u32 hw_action_sz,
594 u16 *used_hw_action_num)
595{
596
597 if (data_sz != HDR_LEN_L2 && data_sz != HDR_LEN_L2_W_VLAN)
598 return -EINVAL;
599
600 return ste_ctx->set_action_decap_l3_list(data, data_sz,
601 hw_action, hw_action_sz,
602 used_hw_action_num);
603}
604
605int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
606 u8 match_criteria,
607 struct mlx5dr_match_param *mask,
608 struct mlx5dr_match_param *value)
609{
610 if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
611 if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
612 mlx5dr_err(dmn,
613 "Partial mask source_port is not supported\n");
614 return -EINVAL;
615 }
616 if (mask->misc.source_eswitch_owner_vhca_id &&
617 mask->misc.source_eswitch_owner_vhca_id != 0xffff) {
618 mlx5dr_err(dmn,
619 "Partial mask source_eswitch_owner_vhca_id is not supported\n");
620 return -EINVAL;
621 }
622 }
623
624 return 0;
625}
626
627int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
628 struct mlx5dr_matcher_rx_tx *nic_matcher,
629 struct mlx5dr_match_param *value,
630 u8 *ste_arr)
631{
632 struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
633 bool is_rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
634 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
635 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
636 struct mlx5dr_ste_build *sb;
637 int ret, i;
638
639 ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
640 &matcher->mask, value);
641 if (ret)
642 return ret;
643
644 sb = nic_matcher->ste_builder;
645 for (i = 0; i < nic_matcher->num_of_builders; i++) {
646 ste_ctx->ste_init(ste_arr,
647 sb->lu_type,
648 is_rx,
649 dmn->info.caps.gvmi);
650
651 mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
652
653 ret = sb->ste_build_tag_func(value, sb, dr_ste_get_tag(ste_arr));
654 if (ret)
655 return ret;
656
657
658 if (i < (nic_matcher->num_of_builders - 1)) {
659
660
661
662 sb++;
663 ste_ctx->set_next_lu_type(ste_arr, sb->lu_type);
664 ste_ctx->set_byte_mask(ste_arr, sb->byte_mask);
665 }
666 ste_arr += DR_STE_SIZE;
667 }
668 return 0;
669}
670
671static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
672{
673 spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present);
674 spec->gre_k_present = MLX5_GET(fte_match_set_misc, mask, gre_k_present);
675 spec->gre_s_present = MLX5_GET(fte_match_set_misc, mask, gre_s_present);
676 spec->source_vhca_port = MLX5_GET(fte_match_set_misc, mask, source_vhca_port);
677 spec->source_sqn = MLX5_GET(fte_match_set_misc, mask, source_sqn);
678
679 spec->source_port = MLX5_GET(fte_match_set_misc, mask, source_port);
680 spec->source_eswitch_owner_vhca_id = MLX5_GET(fte_match_set_misc, mask,
681 source_eswitch_owner_vhca_id);
682
683 spec->outer_second_prio = MLX5_GET(fte_match_set_misc, mask, outer_second_prio);
684 spec->outer_second_cfi = MLX5_GET(fte_match_set_misc, mask, outer_second_cfi);
685 spec->outer_second_vid = MLX5_GET(fte_match_set_misc, mask, outer_second_vid);
686 spec->inner_second_prio = MLX5_GET(fte_match_set_misc, mask, inner_second_prio);
687 spec->inner_second_cfi = MLX5_GET(fte_match_set_misc, mask, inner_second_cfi);
688 spec->inner_second_vid = MLX5_GET(fte_match_set_misc, mask, inner_second_vid);
689
690 spec->outer_second_cvlan_tag =
691 MLX5_GET(fte_match_set_misc, mask, outer_second_cvlan_tag);
692 spec->inner_second_cvlan_tag =
693 MLX5_GET(fte_match_set_misc, mask, inner_second_cvlan_tag);
694 spec->outer_second_svlan_tag =
695 MLX5_GET(fte_match_set_misc, mask, outer_second_svlan_tag);
696 spec->inner_second_svlan_tag =
697 MLX5_GET(fte_match_set_misc, mask, inner_second_svlan_tag);
698
699 spec->gre_protocol = MLX5_GET(fte_match_set_misc, mask, gre_protocol);
700
701 spec->gre_key_h = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.hi);
702 spec->gre_key_l = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.lo);
703
704 spec->vxlan_vni = MLX5_GET(fte_match_set_misc, mask, vxlan_vni);
705
706 spec->geneve_vni = MLX5_GET(fte_match_set_misc, mask, geneve_vni);
707 spec->geneve_oam = MLX5_GET(fte_match_set_misc, mask, geneve_oam);
708
709 spec->outer_ipv6_flow_label =
710 MLX5_GET(fte_match_set_misc, mask, outer_ipv6_flow_label);
711
712 spec->inner_ipv6_flow_label =
713 MLX5_GET(fte_match_set_misc, mask, inner_ipv6_flow_label);
714
715 spec->geneve_opt_len = MLX5_GET(fte_match_set_misc, mask, geneve_opt_len);
716 spec->geneve_protocol_type =
717 MLX5_GET(fte_match_set_misc, mask, geneve_protocol_type);
718
719 spec->bth_dst_qp = MLX5_GET(fte_match_set_misc, mask, bth_dst_qp);
720}
721
722static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec)
723{
724 __be32 raw_ip[4];
725
726 spec->smac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16);
727
728 spec->smac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_15_0);
729 spec->ethertype = MLX5_GET(fte_match_set_lyr_2_4, mask, ethertype);
730
731 spec->dmac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_47_16);
732
733 spec->dmac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_15_0);
734 spec->first_prio = MLX5_GET(fte_match_set_lyr_2_4, mask, first_prio);
735 spec->first_cfi = MLX5_GET(fte_match_set_lyr_2_4, mask, first_cfi);
736 spec->first_vid = MLX5_GET(fte_match_set_lyr_2_4, mask, first_vid);
737
738 spec->ip_protocol = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_protocol);
739 spec->ip_dscp = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_dscp);
740 spec->ip_ecn = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_ecn);
741 spec->cvlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, cvlan_tag);
742 spec->svlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, svlan_tag);
743 spec->frag = MLX5_GET(fte_match_set_lyr_2_4, mask, frag);
744 spec->ip_version = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_version);
745 spec->tcp_flags = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_flags);
746 spec->tcp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_sport);
747 spec->tcp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_dport);
748
749 spec->ttl_hoplimit = MLX5_GET(fte_match_set_lyr_2_4, mask, ttl_hoplimit);
750
751 spec->udp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_sport);
752 spec->udp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_dport);
753
754 memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
755 src_ipv4_src_ipv6.ipv6_layout.ipv6),
756 sizeof(raw_ip));
757
758 spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]);
759 spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]);
760 spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]);
761 spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]);
762
763 memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
764 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
765 sizeof(raw_ip));
766
767 spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]);
768 spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]);
769 spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]);
770 spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]);
771}
772
773static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec)
774{
775 spec->outer_first_mpls_label =
776 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_label);
777 spec->outer_first_mpls_exp =
778 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp);
779 spec->outer_first_mpls_s_bos =
780 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos);
781 spec->outer_first_mpls_ttl =
782 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl);
783 spec->inner_first_mpls_label =
784 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_label);
785 spec->inner_first_mpls_exp =
786 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp);
787 spec->inner_first_mpls_s_bos =
788 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos);
789 spec->inner_first_mpls_ttl =
790 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl);
791 spec->outer_first_mpls_over_gre_label =
792 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label);
793 spec->outer_first_mpls_over_gre_exp =
794 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp);
795 spec->outer_first_mpls_over_gre_s_bos =
796 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos);
797 spec->outer_first_mpls_over_gre_ttl =
798 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl);
799 spec->outer_first_mpls_over_udp_label =
800 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label);
801 spec->outer_first_mpls_over_udp_exp =
802 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp);
803 spec->outer_first_mpls_over_udp_s_bos =
804 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos);
805 spec->outer_first_mpls_over_udp_ttl =
806 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl);
807 spec->metadata_reg_c_7 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_7);
808 spec->metadata_reg_c_6 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_6);
809 spec->metadata_reg_c_5 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_5);
810 spec->metadata_reg_c_4 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_4);
811 spec->metadata_reg_c_3 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_3);
812 spec->metadata_reg_c_2 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_2);
813 spec->metadata_reg_c_1 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_1);
814 spec->metadata_reg_c_0 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_0);
815 spec->metadata_reg_a = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_a);
816}
817
818static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec)
819{
820 spec->inner_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_seq_num);
821 spec->outer_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_seq_num);
822 spec->inner_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_ack_num);
823 spec->outer_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_ack_num);
824 spec->outer_vxlan_gpe_vni =
825 MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_vni);
826 spec->outer_vxlan_gpe_next_protocol =
827 MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol);
828 spec->outer_vxlan_gpe_flags =
829 MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_flags);
830 spec->icmpv4_header_data = MLX5_GET(fte_match_set_misc3, mask, icmp_header_data);
831 spec->icmpv6_header_data =
832 MLX5_GET(fte_match_set_misc3, mask, icmpv6_header_data);
833 spec->icmpv4_type = MLX5_GET(fte_match_set_misc3, mask, icmp_type);
834 spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code);
835 spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type);
836 spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code);
837 spec->geneve_tlv_option_0_data =
838 MLX5_GET(fte_match_set_misc3, mask, geneve_tlv_option_0_data);
839 spec->gtpu_msg_flags = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_flags);
840 spec->gtpu_msg_type = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_type);
841 spec->gtpu_teid = MLX5_GET(fte_match_set_misc3, mask, gtpu_teid);
842 spec->gtpu_dw_0 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_0);
843 spec->gtpu_dw_2 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_2);
844 spec->gtpu_first_ext_dw_0 =
845 MLX5_GET(fte_match_set_misc3, mask, gtpu_first_ext_dw_0);
846}
847
848static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec)
849{
850 spec->prog_sample_field_id_0 =
851 MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_0);
852 spec->prog_sample_field_value_0 =
853 MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_0);
854 spec->prog_sample_field_id_1 =
855 MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_1);
856 spec->prog_sample_field_value_1 =
857 MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_1);
858 spec->prog_sample_field_id_2 =
859 MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_2);
860 spec->prog_sample_field_value_2 =
861 MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_2);
862 spec->prog_sample_field_id_3 =
863 MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_3);
864 spec->prog_sample_field_value_3 =
865 MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_3);
866}
867
868void mlx5dr_ste_copy_param(u8 match_criteria,
869 struct mlx5dr_match_param *set_param,
870 struct mlx5dr_match_parameters *mask)
871{
872 u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {};
873 u8 *data = (u8 *)mask->match_buf;
874 size_t param_location;
875 void *buff;
876
877 if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
878 if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) {
879 memcpy(tail_param, data, mask->match_sz);
880 buff = tail_param;
881 } else {
882 buff = mask->match_buf;
883 }
884 dr_ste_copy_mask_spec(buff, &set_param->outer);
885 }
886 param_location = sizeof(struct mlx5dr_match_spec);
887
888 if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
889 if (mask->match_sz < param_location +
890 sizeof(struct mlx5dr_match_misc)) {
891 memcpy(tail_param, data + param_location,
892 mask->match_sz - param_location);
893 buff = tail_param;
894 } else {
895 buff = data + param_location;
896 }
897 dr_ste_copy_mask_misc(buff, &set_param->misc);
898 }
899 param_location += sizeof(struct mlx5dr_match_misc);
900
901 if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
902 if (mask->match_sz < param_location +
903 sizeof(struct mlx5dr_match_spec)) {
904 memcpy(tail_param, data + param_location,
905 mask->match_sz - param_location);
906 buff = tail_param;
907 } else {
908 buff = data + param_location;
909 }
910 dr_ste_copy_mask_spec(buff, &set_param->inner);
911 }
912 param_location += sizeof(struct mlx5dr_match_spec);
913
914 if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
915 if (mask->match_sz < param_location +
916 sizeof(struct mlx5dr_match_misc2)) {
917 memcpy(tail_param, data + param_location,
918 mask->match_sz - param_location);
919 buff = tail_param;
920 } else {
921 buff = data + param_location;
922 }
923 dr_ste_copy_mask_misc2(buff, &set_param->misc2);
924 }
925
926 param_location += sizeof(struct mlx5dr_match_misc2);
927
928 if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
929 if (mask->match_sz < param_location +
930 sizeof(struct mlx5dr_match_misc3)) {
931 memcpy(tail_param, data + param_location,
932 mask->match_sz - param_location);
933 buff = tail_param;
934 } else {
935 buff = data + param_location;
936 }
937 dr_ste_copy_mask_misc3(buff, &set_param->misc3);
938 }
939
940 param_location += sizeof(struct mlx5dr_match_misc3);
941
942 if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
943 if (mask->match_sz < param_location +
944 sizeof(struct mlx5dr_match_misc4)) {
945 memcpy(tail_param, data + param_location,
946 mask->match_sz - param_location);
947 buff = tail_param;
948 } else {
949 buff = data + param_location;
950 }
951 dr_ste_copy_mask_misc4(buff, &set_param->misc4);
952 }
953}
954
955void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
956 struct mlx5dr_ste_build *sb,
957 struct mlx5dr_match_param *mask,
958 bool inner, bool rx)
959{
960 sb->rx = rx;
961 sb->inner = inner;
962 ste_ctx->build_eth_l2_src_dst_init(sb, mask);
963}
964
965void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
966 struct mlx5dr_ste_build *sb,
967 struct mlx5dr_match_param *mask,
968 bool inner, bool rx)
969{
970 sb->rx = rx;
971 sb->inner = inner;
972 ste_ctx->build_eth_l3_ipv6_dst_init(sb, mask);
973}
974
975void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
976 struct mlx5dr_ste_build *sb,
977 struct mlx5dr_match_param *mask,
978 bool inner, bool rx)
979{
980 sb->rx = rx;
981 sb->inner = inner;
982 ste_ctx->build_eth_l3_ipv6_src_init(sb, mask);
983}
984
985void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
986 struct mlx5dr_ste_build *sb,
987 struct mlx5dr_match_param *mask,
988 bool inner, bool rx)
989{
990 sb->rx = rx;
991 sb->inner = inner;
992 ste_ctx->build_eth_l3_ipv4_5_tuple_init(sb, mask);
993}
994
995void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
996 struct mlx5dr_ste_build *sb,
997 struct mlx5dr_match_param *mask,
998 bool inner, bool rx)
999{
1000 sb->rx = rx;
1001 sb->inner = inner;
1002 ste_ctx->build_eth_l2_src_init(sb, mask);
1003}
1004
1005void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
1006 struct mlx5dr_ste_build *sb,
1007 struct mlx5dr_match_param *mask,
1008 bool inner, bool rx)
1009{
1010 sb->rx = rx;
1011 sb->inner = inner;
1012 ste_ctx->build_eth_l2_dst_init(sb, mask);
1013}
1014
1015void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
1016 struct mlx5dr_ste_build *sb,
1017 struct mlx5dr_match_param *mask, bool inner, bool rx)
1018{
1019 sb->rx = rx;
1020 sb->inner = inner;
1021 ste_ctx->build_eth_l2_tnl_init(sb, mask);
1022}
1023
1024void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
1025 struct mlx5dr_ste_build *sb,
1026 struct mlx5dr_match_param *mask,
1027 bool inner, bool rx)
1028{
1029 sb->rx = rx;
1030 sb->inner = inner;
1031 ste_ctx->build_eth_l3_ipv4_misc_init(sb, mask);
1032}
1033
1034void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
1035 struct mlx5dr_ste_build *sb,
1036 struct mlx5dr_match_param *mask,
1037 bool inner, bool rx)
1038{
1039 sb->rx = rx;
1040 sb->inner = inner;
1041 ste_ctx->build_eth_ipv6_l3_l4_init(sb, mask);
1042}
1043
1044static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
1045 struct mlx5dr_ste_build *sb,
1046 u8 *tag)
1047{
1048 return 0;
1049}
1050
1051void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
1052{
1053 sb->rx = rx;
1054 sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE;
1055 sb->byte_mask = 0;
1056 sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
1057}
1058
1059void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
1060 struct mlx5dr_ste_build *sb,
1061 struct mlx5dr_match_param *mask,
1062 bool inner, bool rx)
1063{
1064 sb->rx = rx;
1065 sb->inner = inner;
1066 ste_ctx->build_mpls_init(sb, mask);
1067}
1068
1069void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
1070 struct mlx5dr_ste_build *sb,
1071 struct mlx5dr_match_param *mask,
1072 bool inner, bool rx)
1073{
1074 sb->rx = rx;
1075 sb->inner = inner;
1076 ste_ctx->build_tnl_gre_init(sb, mask);
1077}
1078
1079void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
1080 struct mlx5dr_ste_build *sb,
1081 struct mlx5dr_match_param *mask,
1082 struct mlx5dr_cmd_caps *caps,
1083 bool inner, bool rx)
1084{
1085 sb->rx = rx;
1086 sb->inner = inner;
1087 sb->caps = caps;
1088 return ste_ctx->build_tnl_mpls_over_gre_init(sb, mask);
1089}
1090
1091void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
1092 struct mlx5dr_ste_build *sb,
1093 struct mlx5dr_match_param *mask,
1094 struct mlx5dr_cmd_caps *caps,
1095 bool inner, bool rx)
1096{
1097 sb->rx = rx;
1098 sb->inner = inner;
1099 sb->caps = caps;
1100 return ste_ctx->build_tnl_mpls_over_udp_init(sb, mask);
1101}
1102
1103void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
1104 struct mlx5dr_ste_build *sb,
1105 struct mlx5dr_match_param *mask,
1106 struct mlx5dr_cmd_caps *caps,
1107 bool inner, bool rx)
1108{
1109 sb->rx = rx;
1110 sb->inner = inner;
1111 sb->caps = caps;
1112 ste_ctx->build_icmp_init(sb, mask);
1113}
1114
1115void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
1116 struct mlx5dr_ste_build *sb,
1117 struct mlx5dr_match_param *mask,
1118 bool inner, bool rx)
1119{
1120 sb->rx = rx;
1121 sb->inner = inner;
1122 ste_ctx->build_general_purpose_init(sb, mask);
1123}
1124
1125void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
1126 struct mlx5dr_ste_build *sb,
1127 struct mlx5dr_match_param *mask,
1128 bool inner, bool rx)
1129{
1130 sb->rx = rx;
1131 sb->inner = inner;
1132 ste_ctx->build_eth_l4_misc_init(sb, mask);
1133}
1134
1135void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
1136 struct mlx5dr_ste_build *sb,
1137 struct mlx5dr_match_param *mask,
1138 bool inner, bool rx)
1139{
1140 sb->rx = rx;
1141 sb->inner = inner;
1142 ste_ctx->build_tnl_vxlan_gpe_init(sb, mask);
1143}
1144
1145void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
1146 struct mlx5dr_ste_build *sb,
1147 struct mlx5dr_match_param *mask,
1148 bool inner, bool rx)
1149{
1150 sb->rx = rx;
1151 sb->inner = inner;
1152 ste_ctx->build_tnl_geneve_init(sb, mask);
1153}
1154
1155void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
1156 struct mlx5dr_ste_build *sb,
1157 struct mlx5dr_match_param *mask,
1158 struct mlx5dr_cmd_caps *caps,
1159 bool inner, bool rx)
1160{
1161 sb->rx = rx;
1162 sb->caps = caps;
1163 sb->inner = inner;
1164 ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask);
1165}
1166
1167void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
1168 struct mlx5dr_ste_build *sb,
1169 struct mlx5dr_match_param *mask,
1170 bool inner, bool rx)
1171{
1172 sb->rx = rx;
1173 sb->inner = inner;
1174 ste_ctx->build_tnl_gtpu_init(sb, mask);
1175}
1176
1177void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
1178 struct mlx5dr_ste_build *sb,
1179 struct mlx5dr_match_param *mask,
1180 struct mlx5dr_cmd_caps *caps,
1181 bool inner, bool rx)
1182{
1183 sb->rx = rx;
1184 sb->caps = caps;
1185 sb->inner = inner;
1186 ste_ctx->build_tnl_gtpu_flex_parser_0_init(sb, mask);
1187}
1188
1189void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
1190 struct mlx5dr_ste_build *sb,
1191 struct mlx5dr_match_param *mask,
1192 struct mlx5dr_cmd_caps *caps,
1193 bool inner, bool rx)
1194{
1195 sb->rx = rx;
1196 sb->caps = caps;
1197 sb->inner = inner;
1198 ste_ctx->build_tnl_gtpu_flex_parser_1_init(sb, mask);
1199}
1200
1201void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
1202 struct mlx5dr_ste_build *sb,
1203 struct mlx5dr_match_param *mask,
1204 bool inner, bool rx)
1205{
1206 sb->rx = rx;
1207 sb->inner = inner;
1208 ste_ctx->build_register_0_init(sb, mask);
1209}
1210
1211void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
1212 struct mlx5dr_ste_build *sb,
1213 struct mlx5dr_match_param *mask,
1214 bool inner, bool rx)
1215{
1216 sb->rx = rx;
1217 sb->inner = inner;
1218 ste_ctx->build_register_1_init(sb, mask);
1219}
1220
1221void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
1222 struct mlx5dr_ste_build *sb,
1223 struct mlx5dr_match_param *mask,
1224 struct mlx5dr_domain *dmn,
1225 bool inner, bool rx)
1226{
1227
1228 sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
1229
1230 sb->rx = rx;
1231 sb->dmn = dmn;
1232 sb->inner = inner;
1233 ste_ctx->build_src_gvmi_qpn_init(sb, mask);
1234}
1235
1236void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
1237 struct mlx5dr_ste_build *sb,
1238 struct mlx5dr_match_param *mask,
1239 bool inner, bool rx)
1240{
1241 sb->rx = rx;
1242 sb->inner = inner;
1243 ste_ctx->build_flex_parser_0_init(sb, mask);
1244}
1245
1246void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
1247 struct mlx5dr_ste_build *sb,
1248 struct mlx5dr_match_param *mask,
1249 bool inner, bool rx)
1250{
1251 sb->rx = rx;
1252 sb->inner = inner;
1253 ste_ctx->build_flex_parser_1_init(sb, mask);
1254}
1255
1256static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
1257 [MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
1258 [MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1,
1259};
1260
1261struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
1262{
1263 if (version > MLX5_STEERING_FORMAT_CONNECTX_6DX)
1264 return NULL;
1265
1266 return mlx5dr_ste_ctx_arr[version];
1267}
1268