1
2
3
4#include <linux/types.h>
5#include <linux/crc32.h>
6#include "dr_ste.h"
7
8struct dr_hw_ste_format {
9 u8 ctrl[DR_STE_SIZE_CTRL];
10 u8 tag[DR_STE_SIZE_TAG];
11 u8 mask[DR_STE_SIZE_MASK];
12};
13
14static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
15{
16 u32 crc = crc32(0, input_data, length);
17
18 return (__force u32)htonl(crc);
19}
20
21bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
22{
23 return caps->sw_format_ver > MLX5_STEERING_FORMAT_CONNECTX_5;
24}
25
26u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
27{
28 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
29 u8 masked[DR_STE_SIZE_TAG] = {};
30 u32 crc32, index;
31 u16 bit;
32 int i;
33
34
35 if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0)
36 return 0;
37
38
39 bit = 1 << (DR_STE_SIZE_TAG - 1);
40 for (i = 0; i < DR_STE_SIZE_TAG; i++) {
41 if (htbl->byte_mask & bit)
42 masked[i] = hw_ste->tag[i];
43
44 bit = bit >> 1;
45 }
46
47 crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
48 index = crc32 & (htbl->chunk->num_of_entries - 1);
49
50 return index;
51}
52
53u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
54{
55 u16 byte_mask = 0;
56 int i;
57
58 for (i = 0; i < DR_STE_SIZE_MASK; i++) {
59 byte_mask = byte_mask << 1;
60 if (bit_mask[i] == 0xff)
61 byte_mask |= 1;
62 }
63 return byte_mask;
64}
65
66static u8 *dr_ste_get_tag(u8 *hw_ste_p)
67{
68 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
69
70 return hw_ste->tag;
71}
72
73void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
74{
75 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
76
77 memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
78}
79
80static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
81{
82 memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
83 memset(&hw_ste->mask, 0, sizeof(hw_ste->mask));
84}
85
86static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
87{
88 hw_ste->tag[0] = 0xdc;
89 hw_ste->mask[0] = 0;
90}
91
92void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
93 u8 *hw_ste_p, u64 miss_addr)
94{
95 ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
96}
97
98static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
99 struct mlx5dr_ste *ste, u64 miss_addr)
100{
101 u8 *hw_ste_p = ste->hw_ste;
102
103 ste_ctx->set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
104 ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
105 dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
106}
107
108void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
109 u8 *hw_ste, u64 icm_addr, u32 ht_size)
110{
111 ste_ctx->set_hit_addr(hw_ste, icm_addr, ht_size);
112}
113
114u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
115{
116 u32 index = ste - ste->htbl->ste_arr;
117
118 return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index;
119}
120
121u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
122{
123 u32 index = ste - ste->htbl->ste_arr;
124
125 return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index;
126}
127
128struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
129{
130 u32 index = ste - ste->htbl->ste_arr;
131
132 return &ste->htbl->miss_list[index];
133}
134
135static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
136 struct mlx5dr_ste *ste,
137 struct mlx5dr_ste_htbl *next_htbl)
138{
139 struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
140 u8 *hw_ste = ste->hw_ste;
141
142 ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
143 ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
144 ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
145
146 dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
147}
148
149bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
150 u8 ste_location)
151{
152 return ste_location == nic_matcher->num_of_builders;
153}
154
155
156
157
158
159
160
161
162
163
164
165
166
167static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
168{
169 memcpy(dst->hw_ste, src->hw_ste, DR_STE_SIZE_REDUCED);
170 dst->next_htbl = src->next_htbl;
171 if (dst->next_htbl)
172 dst->next_htbl->pointing_ste = dst;
173
174 dst->refcount = src->refcount;
175}
176
177
178static void
179dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
180 struct mlx5dr_ste *ste,
181 struct mlx5dr_matcher_rx_tx *nic_matcher,
182 struct mlx5dr_ste_send_info *ste_info_head,
183 struct list_head *send_ste_list,
184 struct mlx5dr_ste_htbl *stats_tbl)
185{
186 u8 tmp_data_ste[DR_STE_SIZE] = {};
187 struct mlx5dr_ste tmp_ste = {};
188 u64 miss_addr;
189
190 tmp_ste.hw_ste = tmp_data_ste;
191
192
193
194
195 memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
196 miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
197 dr_ste_always_miss_addr(ste_ctx, &tmp_ste, miss_addr);
198 memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
199
200 list_del_init(&ste->miss_list_node);
201
202
203 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
204 0, tmp_data_ste,
205 ste_info_head,
206 send_ste_list,
207 true );
208
209 stats_tbl->ctrl.num_of_valid_entries--;
210}
211
212
213
214
215static void
216dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
217 struct mlx5dr_ste *ste,
218 struct mlx5dr_ste *next_ste,
219 struct mlx5dr_ste_send_info *ste_info_head,
220 struct list_head *send_ste_list,
221 struct mlx5dr_ste_htbl *stats_tbl)
222
223{
224 struct mlx5dr_ste_htbl *next_miss_htbl;
225 u8 hw_ste[DR_STE_SIZE] = {};
226 int sb_idx;
227
228 next_miss_htbl = next_ste->htbl;
229
230
231 list_del_init(&next_ste->miss_list_node);
232
233
234 dr_ste_replace(ste, next_ste);
235
236
237 mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false);
238
239
240 memcpy(hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
241 sb_idx = ste->ste_chain_location - 1;
242 mlx5dr_ste_set_bit_mask(hw_ste,
243 nic_matcher->ste_builder[sb_idx].bit_mask);
244
245
246
247
248 mlx5dr_htbl_put(next_miss_htbl);
249
250 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
251 0, hw_ste,
252 ste_info_head,
253 send_ste_list,
254 true );
255
256 stats_tbl->ctrl.num_of_collisions--;
257 stats_tbl->ctrl.num_of_valid_entries--;
258}
259
260
261
262
263static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
264 struct mlx5dr_ste *ste,
265 struct mlx5dr_ste_send_info *ste_info,
266 struct list_head *send_ste_list,
267 struct mlx5dr_ste_htbl *stats_tbl)
268{
269 struct mlx5dr_ste *prev_ste;
270 u64 miss_addr;
271
272 prev_ste = list_prev_entry(ste, miss_list_node);
273 if (WARN_ON(!prev_ste))
274 return;
275
276 miss_addr = ste_ctx->get_miss_addr(ste->hw_ste);
277 ste_ctx->set_miss_addr(prev_ste->hw_ste, miss_addr);
278
279 mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0,
280 prev_ste->hw_ste, ste_info,
281 send_ste_list, true );
282
283 list_del_init(&ste->miss_list_node);
284
285 stats_tbl->ctrl.num_of_valid_entries--;
286 stats_tbl->ctrl.num_of_collisions--;
287}
288
289void mlx5dr_ste_free(struct mlx5dr_ste *ste,
290 struct mlx5dr_matcher *matcher,
291 struct mlx5dr_matcher_rx_tx *nic_matcher)
292{
293 struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
294 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
295 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
296 struct mlx5dr_ste_send_info ste_info_head;
297 struct mlx5dr_ste *next_ste, *first_ste;
298 bool put_on_origin_table = true;
299 struct mlx5dr_ste_htbl *stats_tbl;
300 LIST_HEAD(send_ste_list);
301
302 first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste),
303 struct mlx5dr_ste, miss_list_node);
304 stats_tbl = first_ste->htbl;
305
306
307
308
309
310
311
312 if (first_ste == ste) {
313 struct mlx5dr_ste *last_ste;
314
315 last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste),
316 struct mlx5dr_ste, miss_list_node);
317 if (last_ste == first_ste)
318 next_ste = NULL;
319 else
320 next_ste = list_next_entry(ste, miss_list_node);
321
322 if (!next_ste) {
323
324 dr_ste_remove_head_ste(ste_ctx, ste,
325 nic_matcher,
326 &ste_info_head,
327 &send_ste_list,
328 stats_tbl);
329 } else {
330
331 dr_ste_replace_head_ste(nic_matcher, ste,
332 next_ste, &ste_info_head,
333 &send_ste_list, stats_tbl);
334 put_on_origin_table = false;
335 }
336 } else {
337 dr_ste_remove_middle_ste(ste_ctx, ste,
338 &ste_info_head, &send_ste_list,
339 stats_tbl);
340 }
341
342
343 list_for_each_entry_safe(cur_ste_info, tmp_ste_info,
344 &send_ste_list, send_list) {
345 list_del(&cur_ste_info->send_list);
346 mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste,
347 cur_ste_info->data, cur_ste_info->size,
348 cur_ste_info->offset);
349 }
350
351 if (put_on_origin_table)
352 mlx5dr_htbl_put(ste->htbl);
353}
354
355bool mlx5dr_ste_equal_tag(void *src, void *dst)
356{
357 struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src;
358 struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst;
359
360 return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
361}
362
363void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
364 u8 *hw_ste,
365 struct mlx5dr_ste_htbl *next_htbl)
366{
367 struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
368
369 ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
370}
371
372void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
373 u8 *hw_ste_p, u32 ste_size)
374{
375 if (ste_ctx->prepare_for_postsend)
376 ste_ctx->prepare_for_postsend(hw_ste_p, ste_size);
377}
378
379
380void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
381 u16 gvmi,
382 enum mlx5dr_domain_nic_type nic_type,
383 struct mlx5dr_ste_htbl *htbl,
384 u8 *formatted_ste,
385 struct mlx5dr_htbl_connect_info *connect_info)
386{
387 bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
388 struct mlx5dr_ste ste = {};
389
390 ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi);
391 ste.hw_ste = formatted_ste;
392
393 if (connect_info->type == CONNECT_HIT)
394 dr_ste_always_hit_htbl(ste_ctx, &ste, connect_info->hit_next_htbl);
395 else
396 dr_ste_always_miss_addr(ste_ctx, &ste, connect_info->miss_icm_addr);
397}
398
399int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
400 struct mlx5dr_domain_rx_tx *nic_dmn,
401 struct mlx5dr_ste_htbl *htbl,
402 struct mlx5dr_htbl_connect_info *connect_info,
403 bool update_hw_ste)
404{
405 u8 formatted_ste[DR_STE_SIZE] = {};
406
407 mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
408 dmn->info.caps.gvmi,
409 nic_dmn->type,
410 htbl,
411 formatted_ste,
412 connect_info);
413
414 return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste);
415}
416
417int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
418 struct mlx5dr_matcher_rx_tx *nic_matcher,
419 struct mlx5dr_ste *ste,
420 u8 *cur_hw_ste,
421 enum mlx5dr_icm_chunk_size log_table_size)
422{
423 struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
424 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
425 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
426 struct mlx5dr_htbl_connect_info info;
427 struct mlx5dr_ste_htbl *next_htbl;
428
429 if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
430 u16 next_lu_type;
431 u16 byte_mask;
432
433 next_lu_type = ste_ctx->get_next_lu_type(cur_hw_ste);
434 byte_mask = ste_ctx->get_byte_mask(cur_hw_ste);
435
436 next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
437 log_table_size,
438 next_lu_type,
439 byte_mask);
440 if (!next_htbl) {
441 mlx5dr_dbg(dmn, "Failed allocating table\n");
442 return -ENOMEM;
443 }
444
445
446 info.type = CONNECT_MISS;
447 info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
448 if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
449 &info, false)) {
450 mlx5dr_info(dmn, "Failed writing table to HW\n");
451 goto free_table;
452 }
453
454 mlx5dr_ste_set_hit_addr_by_next_htbl(ste_ctx,
455 cur_hw_ste, next_htbl);
456 ste->next_htbl = next_htbl;
457 next_htbl->pointing_ste = ste;
458 }
459
460 return 0;
461
462free_table:
463 mlx5dr_ste_htbl_free(next_htbl);
464 return -ENOENT;
465}
466
467struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
468 enum mlx5dr_icm_chunk_size chunk_size,
469 u16 lu_type, u16 byte_mask)
470{
471 struct mlx5dr_icm_chunk *chunk;
472 struct mlx5dr_ste_htbl *htbl;
473 int i;
474
475 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
476 if (!htbl)
477 return NULL;
478
479 chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size);
480 if (!chunk)
481 goto out_free_htbl;
482
483 htbl->chunk = chunk;
484 htbl->lu_type = lu_type;
485 htbl->byte_mask = byte_mask;
486 htbl->ste_arr = chunk->ste_arr;
487 htbl->hw_ste_arr = chunk->hw_ste_arr;
488 htbl->miss_list = chunk->miss_list;
489 htbl->refcount = 0;
490
491 for (i = 0; i < chunk->num_of_entries; i++) {
492 struct mlx5dr_ste *ste = &htbl->ste_arr[i];
493
494 ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
495 ste->htbl = htbl;
496 ste->refcount = 0;
497 INIT_LIST_HEAD(&ste->miss_list_node);
498 INIT_LIST_HEAD(&htbl->miss_list[i]);
499 }
500
501 htbl->chunk_size = chunk_size;
502 return htbl;
503
504out_free_htbl:
505 kfree(htbl);
506 return NULL;
507}
508
509int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
510{
511 if (htbl->refcount)
512 return -EBUSY;
513
514 mlx5dr_icm_free_chunk(htbl->chunk);
515 kfree(htbl);
516 return 0;
517}
518
519void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
520 struct mlx5dr_domain *dmn,
521 u8 *action_type_set,
522 u8 *hw_ste_arr,
523 struct mlx5dr_ste_actions_attr *attr,
524 u32 *added_stes)
525{
526 ste_ctx->set_actions_tx(dmn, action_type_set, hw_ste_arr,
527 attr, added_stes);
528}
529
530void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
531 struct mlx5dr_domain *dmn,
532 u8 *action_type_set,
533 u8 *hw_ste_arr,
534 struct mlx5dr_ste_actions_attr *attr,
535 u32 *added_stes)
536{
537 ste_ctx->set_actions_rx(dmn, action_type_set, hw_ste_arr,
538 attr, added_stes);
539}
540
541const struct mlx5dr_ste_action_modify_field *
542mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field)
543{
544 const struct mlx5dr_ste_action_modify_field *hw_field;
545
546 if (sw_field >= ste_ctx->modify_field_arr_sz)
547 return NULL;
548
549 hw_field = &ste_ctx->modify_field_arr[sw_field];
550 if (!hw_field->end && !hw_field->start)
551 return NULL;
552
553 return hw_field;
554}
555
556void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
557 __be64 *hw_action,
558 u8 hw_field,
559 u8 shifter,
560 u8 length,
561 u32 data)
562{
563 ste_ctx->set_action_set((u8 *)hw_action,
564 hw_field, shifter, length, data);
565}
566
567void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
568 __be64 *hw_action,
569 u8 hw_field,
570 u8 shifter,
571 u8 length,
572 u32 data)
573{
574 ste_ctx->set_action_add((u8 *)hw_action,
575 hw_field, shifter, length, data);
576}
577
578void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
579 __be64 *hw_action,
580 u8 dst_hw_field,
581 u8 dst_shifter,
582 u8 dst_len,
583 u8 src_hw_field,
584 u8 src_shifter)
585{
586 ste_ctx->set_action_copy((u8 *)hw_action,
587 dst_hw_field, dst_shifter, dst_len,
588 src_hw_field, src_shifter);
589}
590
591int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
592 void *data, u32 data_sz,
593 u8 *hw_action, u32 hw_action_sz,
594 u16 *used_hw_action_num)
595{
596
597 if (data_sz != HDR_LEN_L2 && data_sz != HDR_LEN_L2_W_VLAN)
598 return -EINVAL;
599
600 return ste_ctx->set_action_decap_l3_list(data, data_sz,
601 hw_action, hw_action_sz,
602 used_hw_action_num);
603}
604
605static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn,
606 struct mlx5dr_match_spec *spec)
607{
608 if (spec->ip_version) {
609 if (spec->ip_version != 0xf) {
610 mlx5dr_err(dmn,
611 "Partial ip_version mask with src/dst IP is not supported\n");
612 return -EINVAL;
613 }
614 } else if (spec->ethertype != 0xffff &&
615 (DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) {
616 mlx5dr_err(dmn,
617 "Partial/no ethertype mask with src/dst IP is not supported\n");
618 return -EINVAL;
619 }
620
621 return 0;
622}
623
624int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
625 u8 match_criteria,
626 struct mlx5dr_match_param *mask,
627 struct mlx5dr_match_param *value)
628{
629 if (value)
630 return 0;
631
632 if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
633 if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
634 mlx5dr_err(dmn,
635 "Partial mask source_port is not supported\n");
636 return -EINVAL;
637 }
638 if (mask->misc.source_eswitch_owner_vhca_id &&
639 mask->misc.source_eswitch_owner_vhca_id != 0xffff) {
640 mlx5dr_err(dmn,
641 "Partial mask source_eswitch_owner_vhca_id is not supported\n");
642 return -EINVAL;
643 }
644 }
645
646 if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) &&
647 dr_ste_build_pre_check_spec(dmn, &mask->outer))
648 return -EINVAL;
649
650 if ((match_criteria & DR_MATCHER_CRITERIA_INNER) &&
651 dr_ste_build_pre_check_spec(dmn, &mask->inner))
652 return -EINVAL;
653
654 return 0;
655}
656
657int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
658 struct mlx5dr_matcher_rx_tx *nic_matcher,
659 struct mlx5dr_match_param *value,
660 u8 *ste_arr)
661{
662 struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
663 bool is_rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
664 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
665 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
666 struct mlx5dr_ste_build *sb;
667 int ret, i;
668
669 ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
670 &matcher->mask, value);
671 if (ret)
672 return ret;
673
674 sb = nic_matcher->ste_builder;
675 for (i = 0; i < nic_matcher->num_of_builders; i++) {
676 ste_ctx->ste_init(ste_arr,
677 sb->lu_type,
678 is_rx,
679 dmn->info.caps.gvmi);
680
681 mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
682
683 ret = sb->ste_build_tag_func(value, sb, dr_ste_get_tag(ste_arr));
684 if (ret)
685 return ret;
686
687
688 if (i < (nic_matcher->num_of_builders - 1)) {
689
690
691
692 sb++;
693 ste_ctx->set_next_lu_type(ste_arr, sb->lu_type);
694 ste_ctx->set_byte_mask(ste_arr, sb->byte_mask);
695 }
696 ste_arr += DR_STE_SIZE;
697 }
698 return 0;
699}
700
701#define IFC_GET_CLR(typ, p, fld, clear) ({ \
702 void *__p = (p); \
703 u32 __t = MLX5_GET(typ, __p, fld); \
704 if (clear) \
705 MLX5_SET(typ, __p, fld, 0); \
706 __t; \
707})
708
709#define memcpy_and_clear(to, from, len, clear) ({ \
710 void *__to = (to), *__from = (from); \
711 size_t __len = (len); \
712 memcpy(__to, __from, __len); \
713 if (clear) \
714 memset(__from, 0, __len); \
715})
716
717static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec, bool clr)
718{
719 spec->gre_c_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_c_present, clr);
720 spec->gre_k_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_k_present, clr);
721 spec->gre_s_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_s_present, clr);
722 spec->source_vhca_port = IFC_GET_CLR(fte_match_set_misc, mask, source_vhca_port, clr);
723 spec->source_sqn = IFC_GET_CLR(fte_match_set_misc, mask, source_sqn, clr);
724
725 spec->source_port = IFC_GET_CLR(fte_match_set_misc, mask, source_port, clr);
726 spec->source_eswitch_owner_vhca_id =
727 IFC_GET_CLR(fte_match_set_misc, mask, source_eswitch_owner_vhca_id, clr);
728
729 spec->outer_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_prio, clr);
730 spec->outer_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cfi, clr);
731 spec->outer_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_vid, clr);
732 spec->inner_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_prio, clr);
733 spec->inner_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cfi, clr);
734 spec->inner_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_vid, clr);
735
736 spec->outer_second_cvlan_tag =
737 IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cvlan_tag, clr);
738 spec->inner_second_cvlan_tag =
739 IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cvlan_tag, clr);
740 spec->outer_second_svlan_tag =
741 IFC_GET_CLR(fte_match_set_misc, mask, outer_second_svlan_tag, clr);
742 spec->inner_second_svlan_tag =
743 IFC_GET_CLR(fte_match_set_misc, mask, inner_second_svlan_tag, clr);
744 spec->gre_protocol = IFC_GET_CLR(fte_match_set_misc, mask, gre_protocol, clr);
745
746 spec->gre_key_h = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.hi, clr);
747 spec->gre_key_l = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.lo, clr);
748
749 spec->vxlan_vni = IFC_GET_CLR(fte_match_set_misc, mask, vxlan_vni, clr);
750
751 spec->geneve_vni = IFC_GET_CLR(fte_match_set_misc, mask, geneve_vni, clr);
752 spec->geneve_tlv_option_0_exist =
753 IFC_GET_CLR(fte_match_set_misc, mask, geneve_tlv_option_0_exist, clr);
754 spec->geneve_oam = IFC_GET_CLR(fte_match_set_misc, mask, geneve_oam, clr);
755
756 spec->outer_ipv6_flow_label =
757 IFC_GET_CLR(fte_match_set_misc, mask, outer_ipv6_flow_label, clr);
758
759 spec->inner_ipv6_flow_label =
760 IFC_GET_CLR(fte_match_set_misc, mask, inner_ipv6_flow_label, clr);
761
762 spec->geneve_opt_len = IFC_GET_CLR(fte_match_set_misc, mask, geneve_opt_len, clr);
763 spec->geneve_protocol_type =
764 IFC_GET_CLR(fte_match_set_misc, mask, geneve_protocol_type, clr);
765
766 spec->bth_dst_qp = IFC_GET_CLR(fte_match_set_misc, mask, bth_dst_qp, clr);
767}
768
769static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec, bool clr)
770{
771 __be32 raw_ip[4];
772
773 spec->smac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_47_16, clr);
774
775 spec->smac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_15_0, clr);
776 spec->ethertype = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ethertype, clr);
777
778 spec->dmac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_47_16, clr);
779
780 spec->dmac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_15_0, clr);
781 spec->first_prio = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_prio, clr);
782 spec->first_cfi = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_cfi, clr);
783 spec->first_vid = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_vid, clr);
784
785 spec->ip_protocol = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_protocol, clr);
786 spec->ip_dscp = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_dscp, clr);
787 spec->ip_ecn = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_ecn, clr);
788 spec->cvlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, cvlan_tag, clr);
789 spec->svlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, svlan_tag, clr);
790 spec->frag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, frag, clr);
791 spec->ip_version = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_version, clr);
792 spec->tcp_flags = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_flags, clr);
793 spec->tcp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_sport, clr);
794 spec->tcp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_dport, clr);
795
796 spec->ttl_hoplimit = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ttl_hoplimit, clr);
797
798 spec->udp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_sport, clr);
799 spec->udp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_dport, clr);
800
801 memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
802 src_ipv4_src_ipv6.ipv6_layout.ipv6),
803 sizeof(raw_ip), clr);
804
805 spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]);
806 spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]);
807 spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]);
808 spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]);
809
810 memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
811 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
812 sizeof(raw_ip), clr);
813
814 spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]);
815 spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]);
816 spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]);
817 spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]);
818}
819
820static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec, bool clr)
821{
822 spec->outer_first_mpls_label =
823 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_label, clr);
824 spec->outer_first_mpls_exp =
825 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp, clr);
826 spec->outer_first_mpls_s_bos =
827 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos, clr);
828 spec->outer_first_mpls_ttl =
829 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl, clr);
830 spec->inner_first_mpls_label =
831 IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_label, clr);
832 spec->inner_first_mpls_exp =
833 IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp, clr);
834 spec->inner_first_mpls_s_bos =
835 IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos, clr);
836 spec->inner_first_mpls_ttl =
837 IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl, clr);
838 spec->outer_first_mpls_over_gre_label =
839 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label, clr);
840 spec->outer_first_mpls_over_gre_exp =
841 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp, clr);
842 spec->outer_first_mpls_over_gre_s_bos =
843 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos, clr);
844 spec->outer_first_mpls_over_gre_ttl =
845 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl, clr);
846 spec->outer_first_mpls_over_udp_label =
847 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label, clr);
848 spec->outer_first_mpls_over_udp_exp =
849 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp, clr);
850 spec->outer_first_mpls_over_udp_s_bos =
851 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos, clr);
852 spec->outer_first_mpls_over_udp_ttl =
853 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl, clr);
854 spec->metadata_reg_c_7 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_7, clr);
855 spec->metadata_reg_c_6 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_6, clr);
856 spec->metadata_reg_c_5 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_5, clr);
857 spec->metadata_reg_c_4 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_4, clr);
858 spec->metadata_reg_c_3 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_3, clr);
859 spec->metadata_reg_c_2 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_2, clr);
860 spec->metadata_reg_c_1 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_1, clr);
861 spec->metadata_reg_c_0 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_0, clr);
862 spec->metadata_reg_a = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_a, clr);
863}
864
865static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec, bool clr)
866{
867 spec->inner_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_seq_num, clr);
868 spec->outer_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_seq_num, clr);
869 spec->inner_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_ack_num, clr);
870 spec->outer_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_ack_num, clr);
871 spec->outer_vxlan_gpe_vni =
872 IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_vni, clr);
873 spec->outer_vxlan_gpe_next_protocol =
874 IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol, clr);
875 spec->outer_vxlan_gpe_flags =
876 IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_flags, clr);
877 spec->icmpv4_header_data = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_header_data, clr);
878 spec->icmpv6_header_data =
879 IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_header_data, clr);
880 spec->icmpv4_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_type, clr);
881 spec->icmpv4_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_code, clr);
882 spec->icmpv6_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_type, clr);
883 spec->icmpv6_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_code, clr);
884 spec->geneve_tlv_option_0_data =
885 IFC_GET_CLR(fte_match_set_misc3, mask, geneve_tlv_option_0_data, clr);
886 spec->gtpu_teid = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_teid, clr);
887 spec->gtpu_msg_flags = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_flags, clr);
888 spec->gtpu_msg_type = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_type, clr);
889 spec->gtpu_dw_0 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_0, clr);
890 spec->gtpu_dw_2 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_2, clr);
891 spec->gtpu_first_ext_dw_0 =
892 IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_first_ext_dw_0, clr);
893}
894
895static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec, bool clr)
896{
897 spec->prog_sample_field_id_0 =
898 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_0, clr);
899 spec->prog_sample_field_value_0 =
900 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_0, clr);
901 spec->prog_sample_field_id_1 =
902 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_1, clr);
903 spec->prog_sample_field_value_1 =
904 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_1, clr);
905 spec->prog_sample_field_id_2 =
906 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_2, clr);
907 spec->prog_sample_field_value_2 =
908 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_2, clr);
909 spec->prog_sample_field_id_3 =
910 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_3, clr);
911 spec->prog_sample_field_value_3 =
912 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_3, clr);
913}
914
915static void dr_ste_copy_mask_misc5(char *mask, struct mlx5dr_match_misc5 *spec, bool clr)
916{
917 spec->macsec_tag_0 =
918 IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_0, clr);
919 spec->macsec_tag_1 =
920 IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_1, clr);
921 spec->macsec_tag_2 =
922 IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_2, clr);
923 spec->macsec_tag_3 =
924 IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_3, clr);
925 spec->tunnel_header_0 =
926 IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_0, clr);
927 spec->tunnel_header_1 =
928 IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_1, clr);
929 spec->tunnel_header_2 =
930 IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_2, clr);
931 spec->tunnel_header_3 =
932 IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_3, clr);
933}
934
935void mlx5dr_ste_copy_param(u8 match_criteria,
936 struct mlx5dr_match_param *set_param,
937 struct mlx5dr_match_parameters *mask,
938 bool clr)
939{
940 u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {};
941 u8 *data = (u8 *)mask->match_buf;
942 size_t param_location;
943 void *buff;
944
945 if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
946 if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) {
947 memcpy(tail_param, data, mask->match_sz);
948 buff = tail_param;
949 } else {
950 buff = mask->match_buf;
951 }
952 dr_ste_copy_mask_spec(buff, &set_param->outer, clr);
953 }
954 param_location = sizeof(struct mlx5dr_match_spec);
955
956 if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
957 if (mask->match_sz < param_location +
958 sizeof(struct mlx5dr_match_misc)) {
959 memcpy(tail_param, data + param_location,
960 mask->match_sz - param_location);
961 buff = tail_param;
962 } else {
963 buff = data + param_location;
964 }
965 dr_ste_copy_mask_misc(buff, &set_param->misc, clr);
966 }
967 param_location += sizeof(struct mlx5dr_match_misc);
968
969 if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
970 if (mask->match_sz < param_location +
971 sizeof(struct mlx5dr_match_spec)) {
972 memcpy(tail_param, data + param_location,
973 mask->match_sz - param_location);
974 buff = tail_param;
975 } else {
976 buff = data + param_location;
977 }
978 dr_ste_copy_mask_spec(buff, &set_param->inner, clr);
979 }
980 param_location += sizeof(struct mlx5dr_match_spec);
981
982 if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
983 if (mask->match_sz < param_location +
984 sizeof(struct mlx5dr_match_misc2)) {
985 memcpy(tail_param, data + param_location,
986 mask->match_sz - param_location);
987 buff = tail_param;
988 } else {
989 buff = data + param_location;
990 }
991 dr_ste_copy_mask_misc2(buff, &set_param->misc2, clr);
992 }
993
994 param_location += sizeof(struct mlx5dr_match_misc2);
995
996 if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
997 if (mask->match_sz < param_location +
998 sizeof(struct mlx5dr_match_misc3)) {
999 memcpy(tail_param, data + param_location,
1000 mask->match_sz - param_location);
1001 buff = tail_param;
1002 } else {
1003 buff = data + param_location;
1004 }
1005 dr_ste_copy_mask_misc3(buff, &set_param->misc3, clr);
1006 }
1007
1008 param_location += sizeof(struct mlx5dr_match_misc3);
1009
1010 if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
1011 if (mask->match_sz < param_location +
1012 sizeof(struct mlx5dr_match_misc4)) {
1013 memcpy(tail_param, data + param_location,
1014 mask->match_sz - param_location);
1015 buff = tail_param;
1016 } else {
1017 buff = data + param_location;
1018 }
1019 dr_ste_copy_mask_misc4(buff, &set_param->misc4, clr);
1020 }
1021
1022 param_location += sizeof(struct mlx5dr_match_misc4);
1023
1024 if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
1025 if (mask->match_sz < param_location +
1026 sizeof(struct mlx5dr_match_misc5)) {
1027 memcpy(tail_param, data + param_location,
1028 mask->match_sz - param_location);
1029 buff = tail_param;
1030 } else {
1031 buff = data + param_location;
1032 }
1033 dr_ste_copy_mask_misc5(buff, &set_param->misc5, clr);
1034 }
1035}
1036
1037void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
1038 struct mlx5dr_ste_build *sb,
1039 struct mlx5dr_match_param *mask,
1040 bool inner, bool rx)
1041{
1042 sb->rx = rx;
1043 sb->inner = inner;
1044 ste_ctx->build_eth_l2_src_dst_init(sb, mask);
1045}
1046
1047void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
1048 struct mlx5dr_ste_build *sb,
1049 struct mlx5dr_match_param *mask,
1050 bool inner, bool rx)
1051{
1052 sb->rx = rx;
1053 sb->inner = inner;
1054 ste_ctx->build_eth_l3_ipv6_dst_init(sb, mask);
1055}
1056
1057void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
1058 struct mlx5dr_ste_build *sb,
1059 struct mlx5dr_match_param *mask,
1060 bool inner, bool rx)
1061{
1062 sb->rx = rx;
1063 sb->inner = inner;
1064 ste_ctx->build_eth_l3_ipv6_src_init(sb, mask);
1065}
1066
1067void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
1068 struct mlx5dr_ste_build *sb,
1069 struct mlx5dr_match_param *mask,
1070 bool inner, bool rx)
1071{
1072 sb->rx = rx;
1073 sb->inner = inner;
1074 ste_ctx->build_eth_l3_ipv4_5_tuple_init(sb, mask);
1075}
1076
1077void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
1078 struct mlx5dr_ste_build *sb,
1079 struct mlx5dr_match_param *mask,
1080 bool inner, bool rx)
1081{
1082 sb->rx = rx;
1083 sb->inner = inner;
1084 ste_ctx->build_eth_l2_src_init(sb, mask);
1085}
1086
1087void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
1088 struct mlx5dr_ste_build *sb,
1089 struct mlx5dr_match_param *mask,
1090 bool inner, bool rx)
1091{
1092 sb->rx = rx;
1093 sb->inner = inner;
1094 ste_ctx->build_eth_l2_dst_init(sb, mask);
1095}
1096
1097void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
1098 struct mlx5dr_ste_build *sb,
1099 struct mlx5dr_match_param *mask, bool inner, bool rx)
1100{
1101 sb->rx = rx;
1102 sb->inner = inner;
1103 ste_ctx->build_eth_l2_tnl_init(sb, mask);
1104}
1105
1106void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
1107 struct mlx5dr_ste_build *sb,
1108 struct mlx5dr_match_param *mask,
1109 bool inner, bool rx)
1110{
1111 sb->rx = rx;
1112 sb->inner = inner;
1113 ste_ctx->build_eth_l3_ipv4_misc_init(sb, mask);
1114}
1115
1116void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
1117 struct mlx5dr_ste_build *sb,
1118 struct mlx5dr_match_param *mask,
1119 bool inner, bool rx)
1120{
1121 sb->rx = rx;
1122 sb->inner = inner;
1123 ste_ctx->build_eth_ipv6_l3_l4_init(sb, mask);
1124}
1125
1126static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
1127 struct mlx5dr_ste_build *sb,
1128 u8 *tag)
1129{
1130 return 0;
1131}
1132
1133void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
1134{
1135 sb->rx = rx;
1136 sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE;
1137 sb->byte_mask = 0;
1138 sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
1139}
1140
1141void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
1142 struct mlx5dr_ste_build *sb,
1143 struct mlx5dr_match_param *mask,
1144 bool inner, bool rx)
1145{
1146 sb->rx = rx;
1147 sb->inner = inner;
1148 ste_ctx->build_mpls_init(sb, mask);
1149}
1150
1151void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
1152 struct mlx5dr_ste_build *sb,
1153 struct mlx5dr_match_param *mask,
1154 bool inner, bool rx)
1155{
1156 sb->rx = rx;
1157 sb->inner = inner;
1158 ste_ctx->build_tnl_gre_init(sb, mask);
1159}
1160
1161void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
1162 struct mlx5dr_ste_build *sb,
1163 struct mlx5dr_match_param *mask,
1164 struct mlx5dr_cmd_caps *caps,
1165 bool inner, bool rx)
1166{
1167 sb->rx = rx;
1168 sb->inner = inner;
1169 sb->caps = caps;
1170 return ste_ctx->build_tnl_mpls_over_gre_init(sb, mask);
1171}
1172
1173void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
1174 struct mlx5dr_ste_build *sb,
1175 struct mlx5dr_match_param *mask,
1176 struct mlx5dr_cmd_caps *caps,
1177 bool inner, bool rx)
1178{
1179 sb->rx = rx;
1180 sb->inner = inner;
1181 sb->caps = caps;
1182 return ste_ctx->build_tnl_mpls_over_udp_init(sb, mask);
1183}
1184
1185void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
1186 struct mlx5dr_ste_build *sb,
1187 struct mlx5dr_match_param *mask,
1188 struct mlx5dr_cmd_caps *caps,
1189 bool inner, bool rx)
1190{
1191 sb->rx = rx;
1192 sb->inner = inner;
1193 sb->caps = caps;
1194 ste_ctx->build_icmp_init(sb, mask);
1195}
1196
1197void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
1198 struct mlx5dr_ste_build *sb,
1199 struct mlx5dr_match_param *mask,
1200 bool inner, bool rx)
1201{
1202 sb->rx = rx;
1203 sb->inner = inner;
1204 ste_ctx->build_general_purpose_init(sb, mask);
1205}
1206
1207void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
1208 struct mlx5dr_ste_build *sb,
1209 struct mlx5dr_match_param *mask,
1210 bool inner, bool rx)
1211{
1212 sb->rx = rx;
1213 sb->inner = inner;
1214 ste_ctx->build_eth_l4_misc_init(sb, mask);
1215}
1216
1217void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
1218 struct mlx5dr_ste_build *sb,
1219 struct mlx5dr_match_param *mask,
1220 bool inner, bool rx)
1221{
1222 sb->rx = rx;
1223 sb->inner = inner;
1224 ste_ctx->build_tnl_vxlan_gpe_init(sb, mask);
1225}
1226
1227void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
1228 struct mlx5dr_ste_build *sb,
1229 struct mlx5dr_match_param *mask,
1230 bool inner, bool rx)
1231{
1232 sb->rx = rx;
1233 sb->inner = inner;
1234 ste_ctx->build_tnl_geneve_init(sb, mask);
1235}
1236
1237void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
1238 struct mlx5dr_ste_build *sb,
1239 struct mlx5dr_match_param *mask,
1240 struct mlx5dr_cmd_caps *caps,
1241 bool inner, bool rx)
1242{
1243 sb->rx = rx;
1244 sb->caps = caps;
1245 sb->inner = inner;
1246 ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask);
1247}
1248
1249void mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx *ste_ctx,
1250 struct mlx5dr_ste_build *sb,
1251 struct mlx5dr_match_param *mask,
1252 struct mlx5dr_cmd_caps *caps,
1253 bool inner, bool rx)
1254{
1255 if (!ste_ctx->build_tnl_geneve_tlv_opt_exist_init)
1256 return;
1257
1258 sb->rx = rx;
1259 sb->caps = caps;
1260 sb->inner = inner;
1261 ste_ctx->build_tnl_geneve_tlv_opt_exist_init(sb, mask);
1262}
1263
1264void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
1265 struct mlx5dr_ste_build *sb,
1266 struct mlx5dr_match_param *mask,
1267 bool inner, bool rx)
1268{
1269 sb->rx = rx;
1270 sb->inner = inner;
1271 ste_ctx->build_tnl_gtpu_init(sb, mask);
1272}
1273
1274void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
1275 struct mlx5dr_ste_build *sb,
1276 struct mlx5dr_match_param *mask,
1277 struct mlx5dr_cmd_caps *caps,
1278 bool inner, bool rx)
1279{
1280 sb->rx = rx;
1281 sb->caps = caps;
1282 sb->inner = inner;
1283 ste_ctx->build_tnl_gtpu_flex_parser_0_init(sb, mask);
1284}
1285
1286void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
1287 struct mlx5dr_ste_build *sb,
1288 struct mlx5dr_match_param *mask,
1289 struct mlx5dr_cmd_caps *caps,
1290 bool inner, bool rx)
1291{
1292 sb->rx = rx;
1293 sb->caps = caps;
1294 sb->inner = inner;
1295 ste_ctx->build_tnl_gtpu_flex_parser_1_init(sb, mask);
1296}
1297
1298void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
1299 struct mlx5dr_ste_build *sb,
1300 struct mlx5dr_match_param *mask,
1301 bool inner, bool rx)
1302{
1303 sb->rx = rx;
1304 sb->inner = inner;
1305 ste_ctx->build_register_0_init(sb, mask);
1306}
1307
1308void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
1309 struct mlx5dr_ste_build *sb,
1310 struct mlx5dr_match_param *mask,
1311 bool inner, bool rx)
1312{
1313 sb->rx = rx;
1314 sb->inner = inner;
1315 ste_ctx->build_register_1_init(sb, mask);
1316}
1317
1318void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
1319 struct mlx5dr_ste_build *sb,
1320 struct mlx5dr_match_param *mask,
1321 struct mlx5dr_domain *dmn,
1322 bool inner, bool rx)
1323{
1324
1325 sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
1326
1327 sb->rx = rx;
1328 sb->dmn = dmn;
1329 sb->inner = inner;
1330 ste_ctx->build_src_gvmi_qpn_init(sb, mask);
1331}
1332
1333void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
1334 struct mlx5dr_ste_build *sb,
1335 struct mlx5dr_match_param *mask,
1336 bool inner, bool rx)
1337{
1338 sb->rx = rx;
1339 sb->inner = inner;
1340 ste_ctx->build_flex_parser_0_init(sb, mask);
1341}
1342
1343void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
1344 struct mlx5dr_ste_build *sb,
1345 struct mlx5dr_match_param *mask,
1346 bool inner, bool rx)
1347{
1348 sb->rx = rx;
1349 sb->inner = inner;
1350 ste_ctx->build_flex_parser_1_init(sb, mask);
1351}
1352
1353void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
1354 struct mlx5dr_ste_build *sb,
1355 struct mlx5dr_match_param *mask,
1356 bool inner, bool rx)
1357{
1358 sb->rx = rx;
1359 sb->inner = inner;
1360 ste_ctx->build_tnl_header_0_1_init(sb, mask);
1361}
1362
1363static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
1364 [MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
1365 [MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1,
1366};
1367
1368struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
1369{
1370 if (version > MLX5_STEERING_FORMAT_CONNECTX_6DX)
1371 return NULL;
1372
1373 return mlx5dr_ste_ctx_arr[version];
1374}
1375