1
2
3
4
5
6#include <string.h>
7#include <math.h>
8#include <sys/param.h>
9#include <rte_common.h>
10#include <rte_errno.h>
11#include <rte_log.h>
12
13#include "tf_core.h"
14#include "tf_util.h"
15#include "tf_common.h"
16#include "tf_em.h"
17#include "tf_em_common.h"
18#include "tf_msg.h"
19#include "tfp.h"
20#include "tf_device.h"
21#include "tf_ext_flow_handle.h"
22#include "cfa_resource_types.h"
23
24#include "bnxt.h"
25
26
27#define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *))
28
29
30
31
32void *eem_db[TF_DIR_MAX];
33
34
35
36
37static uint8_t init;
38
39
40
41
42static enum tf_mem_type mem_type;
43
44
45struct tf_tbl_scope_cb tbl_scopes[TF_NUM_TBL_SCOPE];
46
47
48struct tf_tbl_scope_cb *
49tbl_scope_cb_find(uint32_t tbl_scope_id)
50{
51 int i;
52 struct tf_rm_is_allocated_parms parms = { 0 };
53 int allocated;
54
55
56 parms.rm_db = eem_db[TF_DIR_RX];
57 parms.db_index = TF_EM_TBL_TYPE_TBL_SCOPE;
58 parms.index = tbl_scope_id;
59 parms.allocated = &allocated;
60
61 i = tf_rm_is_allocated(&parms);
62
63 if (i < 0 || allocated != TF_RM_ALLOCATED_ENTRY_IN_USE)
64 return NULL;
65
66 for (i = 0; i < TF_NUM_TBL_SCOPE; i++) {
67 if (tbl_scopes[i].tbl_scope_id == tbl_scope_id)
68 return &tbl_scopes[i];
69 }
70
71 return NULL;
72}
73
74int
75tf_create_tbl_pool_external(enum tf_dir dir,
76 struct tf_tbl_scope_cb *tbl_scope_cb,
77 uint32_t num_entries,
78 uint32_t entry_sz_bytes)
79{
80 struct tfp_calloc_parms parms;
81 uint32_t i;
82 int32_t j;
83 int rc = 0;
84 struct stack *pool = &tbl_scope_cb->ext_act_pool[dir];
85
86 parms.nitems = num_entries;
87 parms.size = sizeof(uint32_t);
88 parms.alignment = 0;
89
90 if (tfp_calloc(&parms) != 0) {
91 TFP_DRV_LOG(ERR, "%s: TBL: external pool failure %s\n",
92 tf_dir_2_str(dir), strerror(ENOMEM));
93 return -ENOMEM;
94 }
95
96
97
98 rc = stack_init(num_entries, parms.mem_va, pool);
99
100 if (rc != 0) {
101 TFP_DRV_LOG(ERR, "%s: TBL: stack init failure %s\n",
102 tf_dir_2_str(dir), strerror(-rc));
103 goto cleanup;
104 }
105
106
107
108
109 tbl_scope_cb->ext_act_pool_mem[dir] = (uint32_t *)parms.mem_va;
110
111
112
113 j = (num_entries - 1) * entry_sz_bytes;
114
115 for (i = 0; i < num_entries; i++) {
116 rc = stack_push(pool, j);
117 if (rc != 0) {
118 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
119 tf_dir_2_str(dir), strerror(-rc));
120 goto cleanup;
121 }
122
123 if (j < 0) {
124 TFP_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
125 dir, j);
126 goto cleanup;
127 }
128 j -= entry_sz_bytes;
129 }
130
131 if (!stack_is_full(pool)) {
132 rc = -EINVAL;
133 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
134 tf_dir_2_str(dir), strerror(-rc));
135 goto cleanup;
136 }
137 return 0;
138cleanup:
139 tfp_free((void *)parms.mem_va);
140 return rc;
141}
142
143
144
145
146
147
148
149
150
151void
152tf_destroy_tbl_pool_external(enum tf_dir dir,
153 struct tf_tbl_scope_cb *tbl_scope_cb)
154{
155 uint32_t *ext_act_pool_mem =
156 tbl_scope_cb->ext_act_pool_mem[dir];
157
158 tfp_free(ext_act_pool_mem);
159}
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174int
175tf_tbl_ext_alloc(struct tf *tfp,
176 struct tf_tbl_alloc_parms *parms)
177{
178 int rc;
179 uint32_t index;
180 struct tf_tbl_scope_cb *tbl_scope_cb;
181 struct stack *pool;
182
183 TF_CHECK_PARMS2(tfp, parms);
184
185
186
187 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
188
189 if (tbl_scope_cb == NULL) {
190 TFP_DRV_LOG(ERR,
191 "%s, table scope not allocated\n",
192 tf_dir_2_str(parms->dir));
193 return -EINVAL;
194 }
195 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
196
197
198
199 rc = stack_pop(pool, &index);
200
201 if (rc != 0) {
202 TFP_DRV_LOG(ERR,
203 "%s, Allocation failed, type:%d\n",
204 tf_dir_2_str(parms->dir),
205 parms->type);
206 return rc;
207 }
208
209 *parms->idx = index;
210 return rc;
211}
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229int
230tf_tbl_ext_free(struct tf *tfp,
231 struct tf_tbl_free_parms *parms)
232{
233 int rc = 0;
234 uint32_t index;
235 struct tf_tbl_scope_cb *tbl_scope_cb;
236 struct stack *pool;
237
238 TF_CHECK_PARMS2(tfp, parms);
239
240
241
242 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
243
244 if (tbl_scope_cb == NULL) {
245 TFP_DRV_LOG(ERR,
246 "%s, table scope error\n",
247 tf_dir_2_str(parms->dir));
248 return -EINVAL;
249 }
250 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
251
252 index = parms->idx;
253
254 rc = stack_push(pool, index);
255
256 if (rc != 0) {
257 TFP_DRV_LOG(ERR,
258 "%s, consistency error, stack full, type:%d, idx:%d\n",
259 tf_dir_2_str(parms->dir),
260 parms->type,
261 index);
262 }
263 return rc;
264}
265
266uint32_t
267tf_em_get_key_mask(int num_entries)
268{
269 uint32_t mask = num_entries - 1;
270
271 if (num_entries & TF_EM_MAX_MASK)
272 return 0;
273
274 if (num_entries > TF_EM_MAX_ENTRY)
275 return 0;
276
277 return mask;
278}
279
280void
281tf_em_create_key_entry(struct cfa_p4_eem_entry_hdr *result,
282 uint8_t *in_key,
283 struct cfa_p4_eem_64b_entry *key_entry)
284{
285 key_entry->hdr.word1 = result->word1;
286 key_entry->hdr.pointer = result->pointer;
287 memcpy(key_entry->key, in_key, TF_HW_EM_KEY_MAX_SIZE + 4);
288}
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304static uint32_t
305tf_em_page_tbl_pgcnt(uint32_t num_pages,
306 uint32_t page_size)
307{
308 return roundup(num_pages, MAX_PAGE_PTRS(page_size)) /
309 MAX_PAGE_PTRS(page_size);
310}
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329static void
330tf_em_size_page_tbls(int max_lvl,
331 uint64_t num_data_pages,
332 uint32_t page_size,
333 uint32_t *page_cnt)
334{
335 if (max_lvl == TF_PT_LVL_0) {
336 page_cnt[TF_PT_LVL_0] = num_data_pages;
337 } else if (max_lvl == TF_PT_LVL_1) {
338 page_cnt[TF_PT_LVL_1] = num_data_pages;
339 page_cnt[TF_PT_LVL_0] =
340 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
341 } else if (max_lvl == TF_PT_LVL_2) {
342 page_cnt[TF_PT_LVL_2] = num_data_pages;
343 page_cnt[TF_PT_LVL_1] =
344 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_2], page_size);
345 page_cnt[TF_PT_LVL_0] =
346 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
347 } else {
348 return;
349 }
350}
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373static int
374tf_em_size_page_tbl_lvl(uint32_t page_size,
375 uint32_t entry_size,
376 uint32_t num_entries,
377 uint64_t *num_data_pages)
378{
379 uint64_t lvl_data_size = page_size;
380 int lvl = TF_PT_LVL_0;
381 uint64_t data_size;
382
383 *num_data_pages = 0;
384 data_size = (uint64_t)num_entries * entry_size;
385
386 while (lvl_data_size < data_size) {
387 lvl++;
388
389 if (lvl == TF_PT_LVL_1)
390 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
391 page_size;
392 else if (lvl == TF_PT_LVL_2)
393 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
394 MAX_PAGE_PTRS(page_size) * page_size;
395 else
396 return -ENOMEM;
397 }
398
399 *num_data_pages = roundup(data_size, page_size) / page_size;
400
401 return lvl;
402}
403
404
405
406
407
408
409
410
411
412
413
414
415int
416tf_em_size_table(struct hcapi_cfa_em_table *tbl,
417 uint32_t page_size)
418{
419 uint64_t num_data_pages;
420 uint32_t *page_cnt;
421 int max_lvl;
422 uint32_t num_entries;
423 uint32_t cnt = TF_EM_MIN_ENTRIES;
424
425
426 if (!tbl->entry_size && !tbl->num_entries)
427 return 0;
428
429
430 if (!tbl->entry_size || !tbl->num_entries)
431 return -EINVAL;
432
433
434
435
436 if (tbl->type == TF_RECORD_TABLE) {
437
438
439
440
441 num_entries = tbl->num_entries / tbl->entry_size;
442 if (num_entries < TF_EM_MIN_ENTRIES) {
443 num_entries = TF_EM_MIN_ENTRIES;
444 } else {
445 while (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES)
446 cnt *= 2;
447 num_entries = cnt;
448 }
449 } else {
450 num_entries = tbl->num_entries;
451 }
452
453 max_lvl = tf_em_size_page_tbl_lvl(page_size,
454 tbl->entry_size,
455 tbl->num_entries,
456 &num_data_pages);
457 if (max_lvl < 0) {
458 TFP_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
459 TFP_DRV_LOG(WARNING,
460 "table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
461 tbl->type, (uint64_t)num_entries * tbl->entry_size,
462 page_size);
463 return -ENOMEM;
464 }
465
466 tbl->num_lvl = max_lvl + 1;
467 tbl->num_data_pages = num_data_pages;
468
469
470 page_cnt = tbl->page_cnt;
471 memset(page_cnt, 0, sizeof(tbl->page_cnt));
472 tf_em_size_page_tbls(max_lvl, num_data_pages, page_size,
473 page_cnt);
474
475 TFP_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
476 TFP_DRV_LOG(INFO,
477 "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 \
478 " l0: %u l1: %u l2: %u\n",
479 max_lvl + 1,
480 (uint64_t)num_data_pages * page_size,
481 num_data_pages,
482 page_cnt[TF_PT_LVL_0],
483 page_cnt[TF_PT_LVL_1],
484 page_cnt[TF_PT_LVL_2]);
485
486 return 0;
487}
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502int
503tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
504 struct tf_alloc_tbl_scope_parms *parms)
505{
506 uint32_t cnt;
507
508 if (parms->rx_mem_size_in_mb != 0) {
509 uint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1);
510 uint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8)
511 + 1);
512 uint32_t num_entries = (parms->rx_mem_size_in_mb *
513 TF_MEGABYTE) / (key_b + action_b);
514
515 if (num_entries < TF_EM_MIN_ENTRIES) {
516 TFP_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
517 "%uMB\n",
518 parms->rx_mem_size_in_mb);
519 return -EINVAL;
520 }
521
522 cnt = TF_EM_MIN_ENTRIES;
523 while (num_entries > cnt &&
524 cnt <= TF_EM_MAX_ENTRIES)
525 cnt *= 2;
526
527 if (cnt > TF_EM_MAX_ENTRIES) {
528 TFP_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
529 "%u\n",
530 (parms->tx_num_flows_in_k * TF_KILOBYTE));
531 return -EINVAL;
532 }
533
534 parms->rx_num_flows_in_k = cnt / TF_KILOBYTE;
535 } else {
536 if ((parms->rx_num_flows_in_k * TF_KILOBYTE) <
537 TF_EM_MIN_ENTRIES ||
538 (parms->rx_num_flows_in_k * TF_KILOBYTE) >
539 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
540 TFP_DRV_LOG(ERR,
541 "EEM: Invalid number of Rx flows "
542 "requested:%u max:%u\n",
543 parms->rx_num_flows_in_k * TF_KILOBYTE,
544 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported);
545 return -EINVAL;
546 }
547
548
549
550
551 cnt = TF_EM_MIN_ENTRIES;
552 while ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt &&
553 cnt <= TF_EM_MAX_ENTRIES)
554 cnt *= 2;
555
556 if (cnt > TF_EM_MAX_ENTRIES) {
557 TFP_DRV_LOG(ERR,
558 "EEM: Invalid number of Rx requested: %u\n",
559 (parms->rx_num_flows_in_k * TF_KILOBYTE));
560 return -EINVAL;
561 }
562 }
563
564 if (parms->tx_mem_size_in_mb != 0) {
565 uint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1);
566 uint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8)
567 + 1);
568 uint32_t num_entries = (parms->tx_mem_size_in_mb *
569 (TF_KILOBYTE * TF_KILOBYTE)) /
570 (key_b + action_b);
571
572 if (num_entries < TF_EM_MIN_ENTRIES) {
573 TFP_DRV_LOG(ERR,
574 "EEM: Insufficient memory requested:%uMB\n",
575 parms->rx_mem_size_in_mb);
576 return -EINVAL;
577 }
578
579 cnt = TF_EM_MIN_ENTRIES;
580 while (num_entries > cnt &&
581 cnt <= TF_EM_MAX_ENTRIES)
582 cnt *= 2;
583
584 if (cnt > TF_EM_MAX_ENTRIES) {
585 TFP_DRV_LOG(ERR,
586 "EEM: Invalid number of Tx requested: %u\n",
587 (parms->tx_num_flows_in_k * TF_KILOBYTE));
588 return -EINVAL;
589 }
590
591 parms->tx_num_flows_in_k = cnt / TF_KILOBYTE;
592 } else {
593 if ((parms->tx_num_flows_in_k * TF_KILOBYTE) <
594 TF_EM_MIN_ENTRIES ||
595 (parms->tx_num_flows_in_k * TF_KILOBYTE) >
596 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
597 TFP_DRV_LOG(ERR,
598 "EEM: Invalid number of Tx flows "
599 "requested:%u max:%u\n",
600 (parms->tx_num_flows_in_k * TF_KILOBYTE),
601 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported);
602 return -EINVAL;
603 }
604
605 cnt = TF_EM_MIN_ENTRIES;
606 while ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt &&
607 cnt <= TF_EM_MAX_ENTRIES)
608 cnt *= 2;
609
610 if (cnt > TF_EM_MAX_ENTRIES) {
611 TFP_DRV_LOG(ERR,
612 "EEM: Invalid number of Tx requested: %u\n",
613 (parms->tx_num_flows_in_k * TF_KILOBYTE));
614 return -EINVAL;
615 }
616 }
617
618 if (parms->rx_num_flows_in_k != 0 &&
619 parms->rx_max_key_sz_in_bits / 8 == 0) {
620 TFP_DRV_LOG(ERR,
621 "EEM: Rx key size required: %u\n",
622 (parms->rx_max_key_sz_in_bits));
623 return -EINVAL;
624 }
625
626 if (parms->tx_num_flows_in_k != 0 &&
627 parms->tx_max_key_sz_in_bits / 8 == 0) {
628 TFP_DRV_LOG(ERR,
629 "EEM: Tx key size required: %u\n",
630 (parms->tx_max_key_sz_in_bits));
631 return -EINVAL;
632 }
633
634 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].num_entries =
635 parms->rx_num_flows_in_k * TF_KILOBYTE;
636 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].entry_size =
637 parms->rx_max_key_sz_in_bits / 8;
638
639 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].num_entries =
640 parms->rx_num_flows_in_k * TF_KILOBYTE;
641 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].entry_size =
642 parms->rx_max_key_sz_in_bits / 8;
643
644 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].num_entries =
645 parms->rx_num_flows_in_k * TF_KILOBYTE;
646 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].entry_size =
647 parms->rx_max_action_entry_sz_in_bits / 8;
648
649 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries = 0;
650
651
652 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].num_entries =
653 parms->tx_num_flows_in_k * TF_KILOBYTE;
654 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].entry_size =
655 parms->tx_max_key_sz_in_bits / 8;
656
657 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].num_entries =
658 parms->tx_num_flows_in_k * TF_KILOBYTE;
659 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].entry_size =
660 parms->tx_max_key_sz_in_bits / 8;
661
662 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].num_entries =
663 parms->tx_num_flows_in_k * TF_KILOBYTE;
664 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].entry_size =
665 parms->tx_max_action_entry_sz_in_bits / 8;
666
667 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries = 0;
668
669 return 0;
670}
671
672
673
674
675
676
677
678
679
680
681
682static int
683tf_insert_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
684 struct tf_insert_em_entry_parms *parms)
685{
686 uint32_t mask;
687 uint32_t key0_hash;
688 uint32_t key1_hash;
689 uint32_t key0_index;
690 uint32_t key1_index;
691 struct cfa_p4_eem_64b_entry key_entry;
692 uint32_t index;
693 enum hcapi_cfa_em_table_type table_type;
694 uint32_t gfid;
695 struct hcapi_cfa_hwop op;
696 struct hcapi_cfa_key_tbl key_tbl;
697 struct hcapi_cfa_key_data key_obj;
698 struct hcapi_cfa_key_loc key_loc;
699 uint64_t big_hash;
700 int rc;
701
702
703 mask = tf_em_get_key_mask(tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE].num_entries);
704
705 if (!mask)
706 return -EINVAL;
707
708#ifdef TF_EEM_DEBUG
709 dump_raw((uint8_t *)parms->key, TF_HW_EM_KEY_MAX_SIZE + 4, "In Key");
710#endif
711
712 big_hash = hcapi_cfa_key_hash((uint64_t *)parms->key,
713 (TF_HW_EM_KEY_MAX_SIZE + 4) * 8);
714 key0_hash = (uint32_t)(big_hash >> 32);
715 key1_hash = (uint32_t)(big_hash & 0xFFFFFFFF);
716
717 key0_index = key0_hash & mask;
718 key1_index = key1_hash & mask;
719
720#ifdef TF_EEM_DEBUG
721 TFP_DRV_LOG(DEBUG, "Key0 hash:0x%08x\n", key0_hash);
722 TFP_DRV_LOG(DEBUG, "Key1 hash:0x%08x\n", key1_hash);
723#endif
724
725
726
727
728
729 tf_em_create_key_entry((struct cfa_p4_eem_entry_hdr *)parms->em_record,
730 ((uint8_t *)parms->key),
731 &key_entry);
732
733
734
735
736
737 index = key0_index;
738 op.opcode = HCAPI_CFA_HWOPS_ADD;
739 key_tbl.base0 =
740 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE];
741 key_tbl.page_size = TF_EM_PAGE_SIZE;
742 key_obj.offset = index * TF_EM_KEY_RECORD_SIZE;
743 key_obj.data = (uint8_t *)&key_entry;
744 key_obj.size = TF_EM_KEY_RECORD_SIZE;
745
746 rc = hcapi_cfa_key_hw_op(&op,
747 &key_tbl,
748 &key_obj,
749 &key_loc);
750
751 if (rc == 0) {
752 table_type = TF_KEY0_TABLE;
753 } else {
754 index = key1_index;
755
756 key_tbl.base0 =
757 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY1_TABLE];
758 key_obj.offset = index * TF_EM_KEY_RECORD_SIZE;
759
760 rc = hcapi_cfa_key_hw_op(&op,
761 &key_tbl,
762 &key_obj,
763 &key_loc);
764 if (rc != 0)
765 return rc;
766
767 table_type = TF_KEY1_TABLE;
768 }
769
770 TF_SET_GFID(gfid,
771 index,
772 table_type);
773 TF_SET_FLOW_ID(parms->flow_id,
774 gfid,
775 TF_GFID_TABLE_EXTERNAL,
776 parms->dir);
777 TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle,
778 0,
779 0,
780 0,
781 index,
782 0,
783 table_type);
784
785 return 0;
786}
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801static int
802tf_delete_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
803 struct tf_delete_em_entry_parms *parms)
804{
805 enum hcapi_cfa_em_table_type hash_type;
806 uint32_t index;
807 struct hcapi_cfa_hwop op;
808 struct hcapi_cfa_key_tbl key_tbl;
809 struct hcapi_cfa_key_data key_obj;
810 struct hcapi_cfa_key_loc key_loc;
811 int rc;
812
813 TF_GET_HASH_TYPE_FROM_FLOW_HANDLE(parms->flow_handle, hash_type);
814 TF_GET_INDEX_FROM_FLOW_HANDLE(parms->flow_handle, index);
815
816 op.opcode = HCAPI_CFA_HWOPS_DEL;
817 key_tbl.base0 =
818 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables
819 [(hash_type == 0 ? TF_KEY0_TABLE : TF_KEY1_TABLE)];
820 key_tbl.page_size = TF_EM_PAGE_SIZE;
821 key_obj.offset = index * TF_EM_KEY_RECORD_SIZE;
822 key_obj.data = NULL;
823 key_obj.size = TF_EM_KEY_RECORD_SIZE;
824
825 rc = hcapi_cfa_key_hw_op(&op,
826 &key_tbl,
827 &key_obj,
828 &key_loc);
829
830 if (!rc)
831 return rc;
832
833 return 0;
834}
835
836
837
838
839
840
841
842int
843tf_em_insert_ext_entry(struct tf *tfp __rte_unused,
844 struct tf_insert_em_entry_parms *parms)
845{
846 struct tf_tbl_scope_cb *tbl_scope_cb;
847
848 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
849 if (tbl_scope_cb == NULL) {
850 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
851 return -EINVAL;
852 }
853
854 return tf_insert_eem_entry
855 (tbl_scope_cb,
856 parms);
857}
858
859
860
861
862
863
864
865int
866tf_em_delete_ext_entry(struct tf *tfp __rte_unused,
867 struct tf_delete_em_entry_parms *parms)
868{
869 struct tf_tbl_scope_cb *tbl_scope_cb;
870
871 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
872 if (tbl_scope_cb == NULL) {
873 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
874 return -EINVAL;
875 }
876
877 return tf_delete_eem_entry(tbl_scope_cb, parms);
878}
879
880
881int
882tf_em_ext_common_bind(struct tf *tfp,
883 struct tf_em_cfg_parms *parms)
884{
885 int rc;
886 int i;
887 struct tf_rm_create_db_parms db_cfg = { 0 };
888 uint8_t db_exists = 0;
889
890 TF_CHECK_PARMS2(tfp, parms);
891
892 if (init) {
893 TFP_DRV_LOG(ERR,
894 "EM Ext DB already initialized\n");
895 return -EINVAL;
896 }
897
898 db_cfg.type = TF_DEVICE_MODULE_TYPE_EM;
899 db_cfg.num_elements = parms->num_elements;
900 db_cfg.cfg = parms->cfg;
901
902 for (i = 0; i < TF_DIR_MAX; i++) {
903 db_cfg.dir = i;
904 db_cfg.alloc_cnt = parms->resources->em_cnt[i].cnt;
905
906
907
908
909 if (db_cfg.alloc_cnt[TF_EM_TBL_TYPE_TBL_SCOPE] == 0)
910 continue;
911
912 db_cfg.rm_db = &eem_db[i];
913 rc = tf_rm_create_db(tfp, &db_cfg);
914 if (rc) {
915 TFP_DRV_LOG(ERR,
916 "%s: EM Ext DB creation failed\n",
917 tf_dir_2_str(i));
918
919 return rc;
920 }
921 db_exists = 1;
922 }
923
924 if (db_exists)
925 init = 1;
926
927 mem_type = parms->mem_type;
928
929 return 0;
930}
931
932int
933tf_em_ext_common_unbind(struct tf *tfp)
934{
935 int rc;
936 int i;
937 struct tf_rm_free_db_parms fparms = { 0 };
938
939 TF_CHECK_PARMS1(tfp);
940
941
942 if (!init) {
943 TFP_DRV_LOG(INFO,
944 "No EM Ext DBs created\n");
945 return 0;
946 }
947
948 for (i = 0; i < TF_DIR_MAX; i++) {
949 fparms.dir = i;
950 fparms.rm_db = eem_db[i];
951 rc = tf_rm_free_db(tfp, &fparms);
952 if (rc)
953 return rc;
954
955 eem_db[i] = NULL;
956 }
957
958 init = 0;
959
960 return 0;
961}
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978int tf_tbl_ext_common_set(struct tf *tfp,
979 struct tf_tbl_set_parms *parms)
980{
981 int rc = 0;
982 struct tf_tbl_scope_cb *tbl_scope_cb;
983 uint32_t tbl_scope_id;
984 struct hcapi_cfa_hwop op;
985 struct hcapi_cfa_key_tbl key_tbl;
986 struct hcapi_cfa_key_data key_obj;
987 struct hcapi_cfa_key_loc key_loc;
988
989 TF_CHECK_PARMS2(tfp, parms);
990
991 if (parms->data == NULL) {
992 TFP_DRV_LOG(ERR,
993 "%s, invalid parms->data\n",
994 tf_dir_2_str(parms->dir));
995 return -EINVAL;
996 }
997
998 tbl_scope_id = parms->tbl_scope_id;
999
1000 if (tbl_scope_id == TF_TBL_SCOPE_INVALID) {
1001 TFP_DRV_LOG(ERR,
1002 "%s, Table scope not allocated\n",
1003 tf_dir_2_str(parms->dir));
1004 return -EINVAL;
1005 }
1006
1007
1008
1009
1010 tbl_scope_cb = tbl_scope_cb_find(tbl_scope_id);
1011
1012 if (tbl_scope_cb == NULL) {
1013 TFP_DRV_LOG(ERR,
1014 "%s, table scope error\n",
1015 tf_dir_2_str(parms->dir));
1016 return -EINVAL;
1017 }
1018
1019 op.opcode = HCAPI_CFA_HWOPS_PUT;
1020 key_tbl.base0 =
1021 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_RECORD_TABLE];
1022 key_tbl.page_size = TF_EM_PAGE_SIZE;
1023 key_obj.offset = parms->idx;
1024 key_obj.data = parms->data;
1025 key_obj.size = parms->data_sz_in_bytes;
1026
1027 rc = hcapi_cfa_key_hw_op(&op,
1028 &key_tbl,
1029 &key_obj,
1030 &key_loc);
1031
1032 return rc;
1033}
1034
1035int
1036tf_em_ext_common_alloc(struct tf *tfp,
1037 struct tf_alloc_tbl_scope_parms *parms)
1038{
1039 return tf_em_ext_alloc(tfp, parms);
1040}
1041
1042int
1043tf_em_ext_common_free(struct tf *tfp,
1044 struct tf_free_tbl_scope_parms *parms)
1045{
1046 return tf_em_ext_free(tfp, parms);
1047}
1048
1049int tf_em_ext_map_tbl_scope(struct tf *tfp,
1050 struct tf_map_tbl_scope_parms *parms)
1051{
1052 int rc = 0;
1053 struct tf_session *tfs;
1054 struct tf_tbl_scope_cb *tbl_scope_cb;
1055 struct tf_global_cfg_parms gcfg_parms = { 0 };
1056 struct tfp_calloc_parms aparms;
1057 uint32_t *data, *mask;
1058 uint32_t sz_in_bytes = 8;
1059 struct tf_dev_info *dev;
1060
1061 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
1062
1063 if (tbl_scope_cb == NULL) {
1064 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb tbl_scope_id(%d)\n",
1065 parms->tbl_scope_id);
1066 return -EINVAL;
1067 }
1068
1069
1070 rc = tf_session_get_session_internal(tfp, &tfs);
1071 if (rc)
1072 return rc;
1073
1074
1075 rc = tf_session_get_device(tfs, &dev);
1076 if (rc)
1077 return rc;
1078
1079 if (dev->ops->tf_dev_map_tbl_scope == NULL) {
1080 rc = -EOPNOTSUPP;
1081 TFP_DRV_LOG(ERR,
1082 "Map table scope operation not supported, rc:%s\n",
1083 strerror(-rc));
1084 return rc;
1085 }
1086
1087 aparms.nitems = 2;
1088 aparms.size = sizeof(uint32_t);
1089 aparms.alignment = 0;
1090
1091 if (tfp_calloc(&aparms) != 0) {
1092 TFP_DRV_LOG(ERR, "Map tbl scope alloc data error %s\n",
1093 strerror(ENOMEM));
1094 return -ENOMEM;
1095 }
1096 data = aparms.mem_va;
1097
1098 if (tfp_calloc(&aparms) != 0) {
1099 TFP_DRV_LOG(ERR, "Map tbl scope alloc mask error %s\n",
1100 strerror(ENOMEM));
1101 rc = -ENOMEM;
1102 goto clean;
1103 }
1104 mask = aparms.mem_va;
1105
1106 rc = dev->ops->tf_dev_map_parif(tfp, parms->parif_bitmask,
1107 tbl_scope_cb->pf,
1108 (uint8_t *)data, (uint8_t *)mask,
1109 sz_in_bytes);
1110
1111 if (rc) {
1112 TFP_DRV_LOG(ERR,
1113 "Map table scope config failure, rc:%s\n",
1114 strerror(-rc));
1115 goto cleaner;
1116 }
1117
1118 gcfg_parms.type =
1119 (enum tf_global_config_type)TF_GLOBAL_CFG_INTERNAL_PARIF_2_PF;
1120 gcfg_parms.offset = 0;
1121 gcfg_parms.config = (uint8_t *)data;
1122 gcfg_parms.config_mask = (uint8_t *)mask;
1123 gcfg_parms.config_sz_in_bytes = sizeof(uint64_t);
1124
1125
1126 rc = tf_msg_set_global_cfg(tfp, &gcfg_parms);
1127 if (rc) {
1128 TFP_DRV_LOG(ERR,
1129 "Map tbl scope, set failed, rc:%s\n",
1130 strerror(-rc));
1131 }
1132cleaner:
1133 tfp_free(mask);
1134clean:
1135 tfp_free(data);
1136
1137 return rc;
1138}
1139