1
2
3
4#include "ice_common.h"
5#include "ice_flex_pipe.h"
6#include "ice_flow.h"
7
8
9
10
11static const struct ice_tunnel_type_scan tnls[] = {
12 { TNL_VXLAN, "TNL_VXLAN_PF" },
13 { TNL_GENEVE, "TNL_GENEVE_PF" },
14 { TNL_LAST, "" }
15};
16
17static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
18
19 {
20 ICE_SID_XLT0_SW,
21 ICE_SID_XLT_KEY_BUILDER_SW,
22 ICE_SID_XLT1_SW,
23 ICE_SID_XLT2_SW,
24 ICE_SID_PROFID_TCAM_SW,
25 ICE_SID_PROFID_REDIR_SW,
26 ICE_SID_FLD_VEC_SW,
27 ICE_SID_CDID_KEY_BUILDER_SW,
28 ICE_SID_CDID_REDIR_SW
29 },
30
31
32 {
33 ICE_SID_XLT0_ACL,
34 ICE_SID_XLT_KEY_BUILDER_ACL,
35 ICE_SID_XLT1_ACL,
36 ICE_SID_XLT2_ACL,
37 ICE_SID_PROFID_TCAM_ACL,
38 ICE_SID_PROFID_REDIR_ACL,
39 ICE_SID_FLD_VEC_ACL,
40 ICE_SID_CDID_KEY_BUILDER_ACL,
41 ICE_SID_CDID_REDIR_ACL
42 },
43
44
45 {
46 ICE_SID_XLT0_FD,
47 ICE_SID_XLT_KEY_BUILDER_FD,
48 ICE_SID_XLT1_FD,
49 ICE_SID_XLT2_FD,
50 ICE_SID_PROFID_TCAM_FD,
51 ICE_SID_PROFID_REDIR_FD,
52 ICE_SID_FLD_VEC_FD,
53 ICE_SID_CDID_KEY_BUILDER_FD,
54 ICE_SID_CDID_REDIR_FD
55 },
56
57
58 {
59 ICE_SID_XLT0_RSS,
60 ICE_SID_XLT_KEY_BUILDER_RSS,
61 ICE_SID_XLT1_RSS,
62 ICE_SID_XLT2_RSS,
63 ICE_SID_PROFID_TCAM_RSS,
64 ICE_SID_PROFID_REDIR_RSS,
65 ICE_SID_FLD_VEC_RSS,
66 ICE_SID_CDID_KEY_BUILDER_RSS,
67 ICE_SID_CDID_REDIR_RSS
68 },
69
70
71 {
72 ICE_SID_XLT0_PE,
73 ICE_SID_XLT_KEY_BUILDER_PE,
74 ICE_SID_XLT1_PE,
75 ICE_SID_XLT2_PE,
76 ICE_SID_PROFID_TCAM_PE,
77 ICE_SID_PROFID_REDIR_PE,
78 ICE_SID_FLD_VEC_PE,
79 ICE_SID_CDID_KEY_BUILDER_PE,
80 ICE_SID_CDID_REDIR_PE
81 }
82};
83
84
85
86
87
88
89
90
91
92static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
93{
94 return ice_sect_lkup[blk][sect];
95}
96
97
98
99
100
101
102
103static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
104{
105 struct ice_buf_hdr *hdr;
106 u16 section_count;
107 u16 data_end;
108
109 hdr = (struct ice_buf_hdr *)buf->buf;
110
111 section_count = le16_to_cpu(hdr->section_count);
112 if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
113 return NULL;
114
115 data_end = le16_to_cpu(hdr->data_end);
116 if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
117 return NULL;
118
119 return hdr;
120}
121
122
123
124
125
126
127
128static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
129{
130 struct ice_nvm_table *nvms;
131
132 nvms = (struct ice_nvm_table *)
133 (ice_seg->device_table +
134 le32_to_cpu(ice_seg->device_table_count));
135
136 return (__force struct ice_buf_table *)
137 (nvms->vers + le32_to_cpu(nvms->table_count));
138}
139
140
141
142
143
144
145
146
147
148
149
150
151
152static struct ice_buf_hdr *
153ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
154{
155 if (ice_seg) {
156 state->buf_table = ice_find_buf_table(ice_seg);
157 if (!state->buf_table)
158 return NULL;
159
160 state->buf_idx = 0;
161 return ice_pkg_val_buf(state->buf_table->buf_array);
162 }
163
164 if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
165 return ice_pkg_val_buf(state->buf_table->buf_array +
166 state->buf_idx);
167 else
168 return NULL;
169}
170
171
172
173
174
175
176
177
178
179static bool
180ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
181{
182 if (!ice_seg && !state->buf)
183 return false;
184
185 if (!ice_seg && state->buf)
186 if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
187 return true;
188
189 state->buf = ice_pkg_enum_buf(ice_seg, state);
190 if (!state->buf)
191 return false;
192
193
194 state->sect_idx = 0;
195 return true;
196}
197
198
199
200
201
202
203
204
205
206
207
208
209
210static void *
211ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
212 u32 sect_type)
213{
214 u16 offset, size;
215
216 if (ice_seg)
217 state->type = sect_type;
218
219 if (!ice_pkg_advance_sect(ice_seg, state))
220 return NULL;
221
222
223 while (state->buf->section_entry[state->sect_idx].type !=
224 cpu_to_le32(state->type))
225 if (!ice_pkg_advance_sect(NULL, state))
226 return NULL;
227
228
229 offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
230 if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
231 return NULL;
232
233 size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
234 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
235 return NULL;
236
237
238 if (offset + size > ICE_PKG_BUF_SIZE)
239 return NULL;
240
241 state->sect_type =
242 le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
243
244
245 state->sect = ((u8 *)state->buf) +
246 le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
247
248 return state->sect;
249}
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276static void *
277ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
278 u32 sect_type, u32 *offset,
279 void *(*handler)(u32 sect_type, void *section,
280 u32 index, u32 *offset))
281{
282 void *entry;
283
284 if (ice_seg) {
285 if (!handler)
286 return NULL;
287
288 if (!ice_pkg_enum_section(ice_seg, state, sect_type))
289 return NULL;
290
291 state->entry_idx = 0;
292 state->handler = handler;
293 } else {
294 state->entry_idx++;
295 }
296
297 if (!state->handler)
298 return NULL;
299
300
301 entry = state->handler(state->sect_type, state->sect, state->entry_idx,
302 offset);
303 if (!entry) {
304
305 if (!ice_pkg_enum_section(NULL, state, 0))
306 return NULL;
307
308 state->entry_idx = 0;
309 entry = state->handler(state->sect_type, state->sect,
310 state->entry_idx, offset);
311 }
312
313 return entry;
314}
315
316
317
318
319
320
321
322
323
324
325
326static void *
327ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
328{
329 struct ice_boost_tcam_section *boost;
330
331 if (!section)
332 return NULL;
333
334 if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
335 return NULL;
336
337
338 if (index > ICE_MAX_BST_TCAMS_IN_BUF)
339 return NULL;
340
341 if (offset)
342 *offset = 0;
343
344 boost = section;
345 if (index >= le16_to_cpu(boost->count))
346 return NULL;
347
348 return boost->tcam + index;
349}
350
351
352
353
354
355
356
357
358
359
360
361static enum ice_status
362ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
363 struct ice_boost_tcam_entry **entry)
364{
365 struct ice_boost_tcam_entry *tcam;
366 struct ice_pkg_enum state;
367
368 memset(&state, 0, sizeof(state));
369
370 if (!ice_seg)
371 return ICE_ERR_PARAM;
372
373 do {
374 tcam = ice_pkg_enum_entry(ice_seg, &state,
375 ICE_SID_RXPARSER_BOOST_TCAM, NULL,
376 ice_boost_tcam_handler);
377 if (tcam && le16_to_cpu(tcam->addr) == addr) {
378 *entry = tcam;
379 return 0;
380 }
381
382 ice_seg = NULL;
383 } while (tcam);
384
385 *entry = NULL;
386 return ICE_ERR_CFG;
387}
388
389
390
391
392
393
394
395
396
397
398
399static void *
400ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
401 u32 *offset)
402{
403 struct ice_label_section *labels;
404
405 if (!section)
406 return NULL;
407
408
409 if (index > ICE_MAX_LABELS_IN_BUF)
410 return NULL;
411
412 if (offset)
413 *offset = 0;
414
415 labels = section;
416 if (index >= le16_to_cpu(labels->count))
417 return NULL;
418
419 return labels->label + index;
420}
421
422
423
424
425
426
427
428
429
430
431
432
433
434static char *
435ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
436 u16 *value)
437{
438 struct ice_label *label;
439
440
441 if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
442 return NULL;
443
444 label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
445 ice_label_enum_handler);
446 if (!label)
447 return NULL;
448
449 *value = le16_to_cpu(label->value);
450 return label->name;
451}
452
453
454
455
456
457
458
459
460
461
462
463static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
464{
465 struct ice_pkg_enum state;
466 char *label_name;
467 u16 val;
468 int i;
469
470 memset(&hw->tnl, 0, sizeof(hw->tnl));
471 memset(&state, 0, sizeof(state));
472
473 if (!ice_seg)
474 return;
475
476 label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
477 &val);
478
479 while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
480 for (i = 0; tnls[i].type != TNL_LAST; i++) {
481 size_t len = strlen(tnls[i].label_prefix);
482
483
484 if (strncmp(label_name, tnls[i].label_prefix, len))
485 continue;
486
487
488
489
490
491 if ((label_name[len] - '0') == hw->pf_id) {
492 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
493 hw->tnl.tbl[hw->tnl.count].valid = false;
494 hw->tnl.tbl[hw->tnl.count].boost_addr = val;
495 hw->tnl.tbl[hw->tnl.count].port = 0;
496 hw->tnl.count++;
497 break;
498 }
499 }
500
501 label_name = ice_enum_labels(NULL, 0, &state, &val);
502 }
503
504
505 for (i = 0; i < hw->tnl.count; i++) {
506 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
507 &hw->tnl.tbl[i].boost_entry);
508 if (hw->tnl.tbl[i].boost_entry) {
509 hw->tnl.tbl[i].valid = true;
510 if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT)
511 hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
512 }
513 }
514}
515
516
517
518#define ICE_DC_KEY 0x1
519#define ICE_DC_KEYINV 0x1
520#define ICE_NM_KEY 0x0
521#define ICE_NM_KEYINV 0x0
522#define ICE_0_KEY 0x1
523#define ICE_0_KEYINV 0x0
524#define ICE_1_KEY 0x0
525#define ICE_1_KEYINV 0x1
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552static enum ice_status
553ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
554 u8 *key_inv)
555{
556 u8 in_key = *key, in_key_inv = *key_inv;
557 u8 i;
558
559
560 if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
561 return ICE_ERR_CFG;
562
563 *key = 0;
564 *key_inv = 0;
565
566
567 for (i = 0; i < 8; i++) {
568 *key >>= 1;
569 *key_inv >>= 1;
570
571 if (!(valid & 0x1)) {
572 *key |= (in_key & 0x1) << 7;
573 *key_inv |= (in_key_inv & 0x1) << 7;
574 } else if (dont_care & 0x1) {
575 *key |= ICE_DC_KEY << 7;
576 *key_inv |= ICE_DC_KEYINV << 7;
577 } else if (nvr_mtch & 0x1) {
578 *key |= ICE_NM_KEY << 7;
579 *key_inv |= ICE_NM_KEYINV << 7;
580 } else if (val & 0x01) {
581 *key |= ICE_1_KEY << 7;
582 *key_inv |= ICE_1_KEYINV << 7;
583 } else {
584 *key |= ICE_0_KEY << 7;
585 *key_inv |= ICE_0_KEYINV << 7;
586 }
587
588 dont_care >>= 1;
589 nvr_mtch >>= 1;
590 valid >>= 1;
591 val >>= 1;
592 in_key >>= 1;
593 in_key_inv >>= 1;
594 }
595
596 return 0;
597}
598
599
600
601
602
603
604
605
606
607
608
609static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
610{
611 u16 count = 0;
612 u16 i;
613
614
615 for (i = 0; i < size; i++) {
616
617 if (!mask[i])
618 continue;
619
620
621
622
623
624 if (count == max)
625 return false;
626
627
628 count += hweight8(mask[i]);
629 if (count > max)
630 return false;
631 }
632
633 return true;
634}
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654static enum ice_status
655ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
656 u16 len)
657{
658 u16 half_size;
659 u16 i;
660
661
662 if (size % 2)
663 return ICE_ERR_CFG;
664
665 half_size = size / 2;
666 if (off + len > half_size)
667 return ICE_ERR_CFG;
668
669
670
671
672
673#define ICE_NVR_MTCH_BITS_MAX 1
674 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
675 return ICE_ERR_CFG;
676
677 for (i = 0; i < len; i++)
678 if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
679 dc ? dc[i] : 0, nm ? nm[i] : 0,
680 key + off + i, key + half_size + off + i))
681 return ICE_ERR_CFG;
682
683 return 0;
684}
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702static enum ice_status
703ice_acquire_global_cfg_lock(struct ice_hw *hw,
704 enum ice_aq_res_access_type access)
705{
706 enum ice_status status;
707
708 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
709 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
710
711 if (!status)
712 mutex_lock(&ice_global_cfg_lock_sw);
713 else if (status == ICE_ERR_AQ_NO_WORK)
714 ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
715
716 return status;
717}
718
719
720
721
722
723
724
725static void ice_release_global_cfg_lock(struct ice_hw *hw)
726{
727 mutex_unlock(&ice_global_cfg_lock_sw);
728 ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
729}
730
731
732
733
734
735
736
737
738enum ice_status
739ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
740{
741 return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
742 ICE_CHANGE_LOCK_TIMEOUT);
743}
744
745
746
747
748
749
750
751void ice_release_change_lock(struct ice_hw *hw)
752{
753 ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
754}
755
756
757
758
759
760
761
762
763
764
765
766
767
768static enum ice_status
769ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
770 u16 buf_size, bool last_buf, u32 *error_offset,
771 u32 *error_info, struct ice_sq_cd *cd)
772{
773 struct ice_aqc_download_pkg *cmd;
774 struct ice_aq_desc desc;
775 enum ice_status status;
776
777 if (error_offset)
778 *error_offset = 0;
779 if (error_info)
780 *error_info = 0;
781
782 cmd = &desc.params.download_pkg;
783 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
784 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
785
786 if (last_buf)
787 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
788
789 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
790 if (status == ICE_ERR_AQ_ERROR) {
791
792 struct ice_aqc_download_pkg_resp *resp;
793
794 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
795 if (error_offset)
796 *error_offset = le32_to_cpu(resp->error_offset);
797 if (error_info)
798 *error_info = le32_to_cpu(resp->error_info);
799 }
800
801 return status;
802}
803
804
805
806
807
808
809
810
811
812
813
814
815
816static enum ice_status
817ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
818 bool last_buf, u32 *error_offset, u32 *error_info,
819 struct ice_sq_cd *cd)
820{
821 struct ice_aqc_download_pkg *cmd;
822 struct ice_aq_desc desc;
823 enum ice_status status;
824
825 if (error_offset)
826 *error_offset = 0;
827 if (error_info)
828 *error_info = 0;
829
830 cmd = &desc.params.download_pkg;
831 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
832 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
833
834 if (last_buf)
835 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
836
837 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
838 if (status == ICE_ERR_AQ_ERROR) {
839
840 struct ice_aqc_download_pkg_resp *resp;
841
842 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
843 if (error_offset)
844 *error_offset = le32_to_cpu(resp->error_offset);
845 if (error_info)
846 *error_info = le32_to_cpu(resp->error_info);
847 }
848
849 return status;
850}
851
852
853
854
855
856
857
858
859
860
861
862static struct ice_generic_seg_hdr *
863ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
864 struct ice_pkg_hdr *pkg_hdr)
865{
866 u32 i;
867
868 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
869 pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
870 pkg_hdr->pkg_format_ver.update,
871 pkg_hdr->pkg_format_ver.draft);
872
873
874 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
875 struct ice_generic_seg_hdr *seg;
876
877 seg = (struct ice_generic_seg_hdr *)
878 ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]));
879
880 if (le32_to_cpu(seg->seg_type) == seg_type)
881 return seg;
882 }
883
884 return NULL;
885}
886
887
888
889
890
891
892
893
894
895static enum ice_status
896ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
897{
898 enum ice_status status;
899 u32 offset, info, i;
900
901 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
902 if (status)
903 return status;
904
905 for (i = 0; i < count; i++) {
906 struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
907 bool last = ((i + 1) == count);
908
909 status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
910 last, &offset, &info, NULL);
911
912 if (status) {
913 ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
914 status, offset, info);
915 break;
916 }
917 }
918
919 ice_release_change_lock(hw);
920
921 return status;
922}
923
924
925
926
927
928
929
930
931
932
933
934static enum ice_status
935ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
936{
937 enum ice_status status;
938 struct ice_buf_hdr *bh;
939 u32 offset, info, i;
940
941 if (!bufs || !count)
942 return ICE_ERR_PARAM;
943
944
945
946
947
948 bh = (struct ice_buf_hdr *)bufs;
949 if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
950 return 0;
951
952
953
954
955 hw->pkg_dwnld_status = ICE_AQ_RC_OK;
956
957 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
958 if (status) {
959 if (status == ICE_ERR_AQ_NO_WORK)
960 hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
961 else
962 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
963 return status;
964 }
965
966 for (i = 0; i < count; i++) {
967 bool last = ((i + 1) == count);
968
969 if (!last) {
970
971 bh = (struct ice_buf_hdr *)(bufs + i + 1);
972
973
974
975
976
977 if (le16_to_cpu(bh->section_count))
978 if (le32_to_cpu(bh->section_entry[0].type) &
979 ICE_METADATA_BUF)
980 last = true;
981 }
982
983 bh = (struct ice_buf_hdr *)(bufs + i);
984
985 status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
986 &offset, &info, NULL);
987
988
989 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
990 if (status) {
991 ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
992 status, offset, info);
993
994 break;
995 }
996
997 if (last)
998 break;
999 }
1000
1001 ice_release_global_cfg_lock(hw);
1002
1003 return status;
1004}
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015static enum ice_status
1016ice_aq_get_pkg_info_list(struct ice_hw *hw,
1017 struct ice_aqc_get_pkg_info_resp *pkg_info,
1018 u16 buf_size, struct ice_sq_cd *cd)
1019{
1020 struct ice_aq_desc desc;
1021
1022 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
1023
1024 return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
1025}
1026
1027
1028
1029
1030
1031
1032
1033
1034static enum ice_status
1035ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
1036{
1037 struct ice_buf_table *ice_buf_tbl;
1038
1039 ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
1040 ice_seg->hdr.seg_format_ver.major,
1041 ice_seg->hdr.seg_format_ver.minor,
1042 ice_seg->hdr.seg_format_ver.update,
1043 ice_seg->hdr.seg_format_ver.draft);
1044
1045 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
1046 le32_to_cpu(ice_seg->hdr.seg_type),
1047 le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
1048
1049 ice_buf_tbl = ice_find_buf_table(ice_seg);
1050
1051 ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
1052 le32_to_cpu(ice_buf_tbl->buf_count));
1053
1054 return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
1055 le32_to_cpu(ice_buf_tbl->buf_count));
1056}
1057
1058
1059
1060
1061
1062
1063
1064
1065static enum ice_status
1066ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
1067{
1068 struct ice_generic_seg_hdr *seg_hdr;
1069
1070 if (!pkg_hdr)
1071 return ICE_ERR_PARAM;
1072
1073 seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
1074 if (seg_hdr) {
1075 struct ice_meta_sect *meta;
1076 struct ice_pkg_enum state;
1077
1078 memset(&state, 0, sizeof(state));
1079
1080
1081 meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
1082 ICE_SID_METADATA);
1083 if (!meta) {
1084 ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
1085 return ICE_ERR_CFG;
1086 }
1087
1088 hw->pkg_ver = meta->ver;
1089 memcpy(hw->pkg_name, meta->name, sizeof(meta->name));
1090
1091 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
1092 meta->ver.major, meta->ver.minor, meta->ver.update,
1093 meta->ver.draft, meta->name);
1094
1095 hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
1096 memcpy(hw->ice_seg_id, seg_hdr->seg_id,
1097 sizeof(hw->ice_seg_id));
1098
1099 ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
1100 seg_hdr->seg_format_ver.major,
1101 seg_hdr->seg_format_ver.minor,
1102 seg_hdr->seg_format_ver.update,
1103 seg_hdr->seg_format_ver.draft,
1104 seg_hdr->seg_id);
1105 } else {
1106 ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
1107 return ICE_ERR_CFG;
1108 }
1109
1110 return 0;
1111}
1112
1113
1114
1115
1116
1117
1118
1119static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
1120{
1121 struct ice_aqc_get_pkg_info_resp *pkg_info;
1122 enum ice_status status;
1123 u16 size;
1124 u32 i;
1125
1126 size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
1127 pkg_info = kzalloc(size, GFP_KERNEL);
1128 if (!pkg_info)
1129 return ICE_ERR_NO_MEMORY;
1130
1131 status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
1132 if (status)
1133 goto init_pkg_free_alloc;
1134
1135 for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
1136#define ICE_PKG_FLAG_COUNT 4
1137 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
1138 u8 place = 0;
1139
1140 if (pkg_info->pkg_info[i].is_active) {
1141 flags[place++] = 'A';
1142 hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
1143 hw->active_track_id =
1144 le32_to_cpu(pkg_info->pkg_info[i].track_id);
1145 memcpy(hw->active_pkg_name,
1146 pkg_info->pkg_info[i].name,
1147 sizeof(pkg_info->pkg_info[i].name));
1148 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
1149 }
1150 if (pkg_info->pkg_info[i].is_active_at_boot)
1151 flags[place++] = 'B';
1152 if (pkg_info->pkg_info[i].is_modified)
1153 flags[place++] = 'M';
1154 if (pkg_info->pkg_info[i].is_in_nvm)
1155 flags[place++] = 'N';
1156
1157 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
1158 i, pkg_info->pkg_info[i].ver.major,
1159 pkg_info->pkg_info[i].ver.minor,
1160 pkg_info->pkg_info[i].ver.update,
1161 pkg_info->pkg_info[i].ver.draft,
1162 pkg_info->pkg_info[i].name, flags);
1163 }
1164
1165init_pkg_free_alloc:
1166 kfree(pkg_info);
1167
1168 return status;
1169}
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1180{
1181 u32 seg_count;
1182 u32 i;
1183
1184 if (len < struct_size(pkg, seg_offset, 1))
1185 return ICE_ERR_BUF_TOO_SHORT;
1186
1187 if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1188 pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1189 pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
1190 pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
1191 return ICE_ERR_CFG;
1192
1193
1194 seg_count = le32_to_cpu(pkg->seg_count);
1195 if (seg_count < 1)
1196 return ICE_ERR_CFG;
1197
1198
1199 if (len < struct_size(pkg, seg_offset, seg_count))
1200 return ICE_ERR_BUF_TOO_SHORT;
1201
1202
1203 for (i = 0; i < seg_count; i++) {
1204 u32 off = le32_to_cpu(pkg->seg_offset[i]);
1205 struct ice_generic_seg_hdr *seg;
1206
1207
1208 if (len < off + sizeof(*seg))
1209 return ICE_ERR_BUF_TOO_SHORT;
1210
1211 seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1212
1213
1214 if (len < off + le32_to_cpu(seg->seg_size))
1215 return ICE_ERR_BUF_TOO_SHORT;
1216 }
1217
1218 return 0;
1219}
1220
1221
1222
1223
1224
1225
1226
1227
1228void ice_free_seg(struct ice_hw *hw)
1229{
1230 if (hw->pkg_copy) {
1231 devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
1232 hw->pkg_copy = NULL;
1233 hw->pkg_size = 0;
1234 }
1235 hw->seg = NULL;
1236}
1237
1238
1239
1240
1241
1242static void ice_init_pkg_regs(struct ice_hw *hw)
1243{
1244#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1245#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1246#define ICE_SW_BLK_IDX 0
1247
1248
1249 wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1250 wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1251}
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1263{
1264 if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
1265 pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
1266 return ICE_ERR_NOT_SUPPORTED;
1267
1268 return 0;
1269}
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279static enum ice_status
1280ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
1281 struct ice_seg **seg)
1282{
1283 struct ice_aqc_get_pkg_info_resp *pkg;
1284 enum ice_status status;
1285 u16 size;
1286 u32 i;
1287
1288
1289 status = ice_chk_pkg_version(&hw->pkg_ver);
1290 if (status) {
1291 ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1292 return status;
1293 }
1294
1295
1296 *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
1297 ospkg);
1298 if (!*seg) {
1299 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1300 return ICE_ERR_CFG;
1301 }
1302
1303
1304 size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
1305 pkg = kzalloc(size, GFP_KERNEL);
1306 if (!pkg)
1307 return ICE_ERR_NO_MEMORY;
1308
1309 status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
1310 if (status)
1311 goto fw_ddp_compat_free_alloc;
1312
1313 for (i = 0; i < le32_to_cpu(pkg->count); i++) {
1314
1315 if (!pkg->pkg_info[i].is_in_nvm)
1316 continue;
1317 if ((*seg)->hdr.seg_format_ver.major !=
1318 pkg->pkg_info[i].ver.major ||
1319 (*seg)->hdr.seg_format_ver.minor >
1320 pkg->pkg_info[i].ver.minor) {
1321 status = ICE_ERR_FW_DDP_MISMATCH;
1322 ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
1323 }
1324
1325 break;
1326 }
1327fw_ddp_compat_free_alloc:
1328 kfree(pkg);
1329 return status;
1330}
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343static void *
1344ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
1345{
1346 struct ice_sw_fv_section *fv_section = section;
1347
1348 if (!section || sect_type != ICE_SID_FLD_VEC_SW)
1349 return NULL;
1350 if (index >= le16_to_cpu(fv_section->count))
1351 return NULL;
1352 if (offset)
1353
1354
1355
1356
1357
1358 *offset = le16_to_cpu(fv_section->base_offset) + index;
1359 return fv_section->fv + index;
1360}
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370static enum ice_status ice_get_prof_index_max(struct ice_hw *hw)
1371{
1372 u16 prof_index = 0, j, max_prof_index = 0;
1373 struct ice_pkg_enum state;
1374 struct ice_seg *ice_seg;
1375 bool flag = false;
1376 struct ice_fv *fv;
1377 u32 offset;
1378
1379 memset(&state, 0, sizeof(state));
1380
1381 if (!hw->seg)
1382 return ICE_ERR_PARAM;
1383
1384 ice_seg = hw->seg;
1385
1386 do {
1387 fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1388 &offset, ice_sw_fv_handler);
1389 if (!fv)
1390 break;
1391 ice_seg = NULL;
1392
1393
1394
1395
1396 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1397 if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
1398 fv->ew[j].off != ICE_FV_OFFSET_INVAL)
1399 flag = true;
1400 if (flag && prof_index > max_prof_index)
1401 max_prof_index = prof_index;
1402
1403 prof_index++;
1404 flag = false;
1405 } while (fv);
1406
1407 hw->switch_info->max_used_prof_index = max_prof_index;
1408
1409 return 0;
1410}
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1438{
1439 struct ice_pkg_hdr *pkg;
1440 enum ice_status status;
1441 struct ice_seg *seg;
1442
1443 if (!buf || !len)
1444 return ICE_ERR_PARAM;
1445
1446 pkg = (struct ice_pkg_hdr *)buf;
1447 status = ice_verify_pkg(pkg, len);
1448 if (status) {
1449 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1450 status);
1451 return status;
1452 }
1453
1454
1455 status = ice_init_pkg_info(hw, pkg);
1456 if (status)
1457 return status;
1458
1459
1460
1461
1462 status = ice_chk_pkg_compat(hw, pkg, &seg);
1463 if (status)
1464 return status;
1465
1466
1467 ice_init_pkg_hints(hw, seg);
1468 status = ice_download_pkg(hw, seg);
1469 if (status == ICE_ERR_AQ_NO_WORK) {
1470 ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
1471 status = 0;
1472 }
1473
1474
1475
1476
1477 if (!status) {
1478 status = ice_get_pkg_info(hw);
1479 if (!status)
1480 status = ice_chk_pkg_version(&hw->active_pkg_ver);
1481 }
1482
1483 if (!status) {
1484 hw->seg = seg;
1485
1486
1487
1488
1489 ice_init_pkg_regs(hw);
1490 ice_fill_blk_tbls(hw);
1491 ice_get_prof_index_max(hw);
1492 } else {
1493 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1494 status);
1495 }
1496
1497 return status;
1498}
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1524{
1525 enum ice_status status;
1526 u8 *buf_copy;
1527
1528 if (!buf || !len)
1529 return ICE_ERR_PARAM;
1530
1531 buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
1532
1533 status = ice_init_pkg(hw, buf_copy, len);
1534 if (status) {
1535
1536 devm_kfree(ice_hw_to_dev(hw), buf_copy);
1537 } else {
1538
1539 hw->pkg_copy = buf_copy;
1540 hw->pkg_size = len;
1541 }
1542
1543 return status;
1544}
1545
1546
1547
1548
1549
1550
1551
1552
1553static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1554{
1555 struct ice_buf_build *bld;
1556 struct ice_buf_hdr *buf;
1557
1558 bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL);
1559 if (!bld)
1560 return NULL;
1561
1562 buf = (struct ice_buf_hdr *)bld;
1563 buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr,
1564 section_entry));
1565 return bld;
1566}
1567
1568
1569
1570
1571
1572
1573static enum ice_prof_type
1574ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
1575{
1576 u16 i;
1577
1578 for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
1579
1580 if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
1581 fv->ew[i].off == ICE_VNI_OFFSET)
1582 return ICE_PROF_TUN_UDP;
1583
1584
1585 if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
1586 return ICE_PROF_TUN_GRE;
1587 }
1588
1589 return ICE_PROF_NON_TUN;
1590}
1591
1592
1593
1594
1595
1596
1597
1598void
1599ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
1600 unsigned long *bm)
1601{
1602 struct ice_pkg_enum state;
1603 struct ice_seg *ice_seg;
1604 struct ice_fv *fv;
1605
1606 if (req_profs == ICE_PROF_ALL) {
1607 bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
1608 return;
1609 }
1610
1611 memset(&state, 0, sizeof(state));
1612 bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
1613 ice_seg = hw->seg;
1614 do {
1615 enum ice_prof_type prof_type;
1616 u32 offset;
1617
1618 fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1619 &offset, ice_sw_fv_handler);
1620 ice_seg = NULL;
1621
1622 if (fv) {
1623
1624 prof_type = ice_get_sw_prof_type(hw, fv);
1625
1626 if (req_profs & prof_type)
1627 set_bit((u16)offset, bm);
1628 }
1629 } while (fv);
1630}
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647enum ice_status
1648ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
1649 unsigned long *bm, struct list_head *fv_list)
1650{
1651 struct ice_sw_fv_list_entry *fvl;
1652 struct ice_sw_fv_list_entry *tmp;
1653 struct ice_pkg_enum state;
1654 struct ice_seg *ice_seg;
1655 struct ice_fv *fv;
1656 u32 offset;
1657
1658 memset(&state, 0, sizeof(state));
1659
1660 if (!ids_cnt || !hw->seg)
1661 return ICE_ERR_PARAM;
1662
1663 ice_seg = hw->seg;
1664 do {
1665 u16 i;
1666
1667 fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1668 &offset, ice_sw_fv_handler);
1669 if (!fv)
1670 break;
1671 ice_seg = NULL;
1672
1673
1674
1675
1676 if (!test_bit((u16)offset, bm))
1677 continue;
1678
1679 for (i = 0; i < ids_cnt; i++) {
1680 int j;
1681
1682
1683
1684
1685
1686
1687 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1688 if (fv->ew[j].prot_id == prot_ids[i])
1689 break;
1690 if (j >= hw->blk[ICE_BLK_SW].es.fvw)
1691 break;
1692 if (i + 1 == ids_cnt) {
1693 fvl = devm_kzalloc(ice_hw_to_dev(hw),
1694 sizeof(*fvl), GFP_KERNEL);
1695 if (!fvl)
1696 goto err;
1697 fvl->fv_ptr = fv;
1698 fvl->profile_id = offset;
1699 list_add(&fvl->list_entry, fv_list);
1700 break;
1701 }
1702 }
1703 } while (fv);
1704 if (list_empty(fv_list))
1705 return ICE_ERR_CFG;
1706 return 0;
1707
1708err:
1709 list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) {
1710 list_del(&fvl->list_entry);
1711 devm_kfree(ice_hw_to_dev(hw), fvl);
1712 }
1713
1714 return ICE_ERR_NO_MEMORY;
1715}
1716
1717
1718
1719
1720
1721void ice_init_prof_result_bm(struct ice_hw *hw)
1722{
1723 struct ice_pkg_enum state;
1724 struct ice_seg *ice_seg;
1725 struct ice_fv *fv;
1726
1727 memset(&state, 0, sizeof(state));
1728
1729 if (!hw->seg)
1730 return;
1731
1732 ice_seg = hw->seg;
1733 do {
1734 u32 off;
1735 u16 i;
1736
1737 fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1738 &off, ice_sw_fv_handler);
1739 ice_seg = NULL;
1740 if (!fv)
1741 break;
1742
1743 bitmap_zero(hw->switch_info->prof_res_bm[off],
1744 ICE_MAX_FV_WORDS);
1745
1746
1747
1748
1749
1750 for (i = 1; i < ICE_MAX_FV_WORDS; i++)
1751 if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
1752 fv->ew[i].off == ICE_FV_OFFSET_INVAL)
1753 set_bit(i, hw->switch_info->prof_res_bm[off]);
1754 } while (fv);
1755}
1756
1757
1758
1759
1760
1761
1762
1763
1764static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1765{
1766 devm_kfree(ice_hw_to_dev(hw), bld);
1767}
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782static enum ice_status
1783ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
1784{
1785 struct ice_buf_hdr *buf;
1786 u16 section_count;
1787 u16 data_end;
1788
1789 if (!bld)
1790 return ICE_ERR_PARAM;
1791
1792 buf = (struct ice_buf_hdr *)&bld->buf;
1793
1794
1795 section_count = le16_to_cpu(buf->section_count);
1796 if (section_count > 0)
1797 return ICE_ERR_CFG;
1798
1799 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
1800 return ICE_ERR_CFG;
1801 bld->reserved_section_table_entries += count;
1802
1803 data_end = le16_to_cpu(buf->data_end) +
1804 flex_array_size(buf, section_entry, count);
1805 buf->data_end = cpu_to_le16(data_end);
1806
1807 return 0;
1808}
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822static void *
1823ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
1824{
1825 struct ice_buf_hdr *buf;
1826 u16 sect_count;
1827 u16 data_end;
1828
1829 if (!bld || !type || !size)
1830 return NULL;
1831
1832 buf = (struct ice_buf_hdr *)&bld->buf;
1833
1834
1835 data_end = le16_to_cpu(buf->data_end);
1836
1837
1838 data_end = ALIGN(data_end, 4);
1839
1840 if ((data_end + size) > ICE_MAX_S_DATA_END)
1841 return NULL;
1842
1843
1844 sect_count = le16_to_cpu(buf->section_count);
1845 if (sect_count < bld->reserved_section_table_entries) {
1846 void *section_ptr = ((u8 *)buf) + data_end;
1847
1848 buf->section_entry[sect_count].offset = cpu_to_le16(data_end);
1849 buf->section_entry[sect_count].size = cpu_to_le16(size);
1850 buf->section_entry[sect_count].type = cpu_to_le32(type);
1851
1852 data_end += size;
1853 buf->data_end = cpu_to_le16(data_end);
1854
1855 buf->section_count = cpu_to_le16(sect_count + 1);
1856 return section_ptr;
1857 }
1858
1859
1860 return NULL;
1861}
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
1874{
1875 struct ice_buf_hdr *buf;
1876
1877 if (!bld)
1878 return 0;
1879
1880 buf = (struct ice_buf_hdr *)&bld->buf;
1881 return le16_to_cpu(buf->section_count);
1882}
1883
1884
1885
1886
1887
1888
1889
1890static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
1891{
1892 if (!bld)
1893 return NULL;
1894
1895 return &bld->buf;
1896}
1897
1898
1899
1900
1901
1902
1903
1904bool
1905ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
1906 enum ice_tunnel_type type)
1907{
1908 bool res = false;
1909 u16 i;
1910
1911 mutex_lock(&hw->tnl_lock);
1912
1913 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1914 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port &&
1915 (type == TNL_LAST || type == hw->tnl.tbl[i].type)) {
1916 *port = hw->tnl.tbl[i].port;
1917 res = true;
1918 break;
1919 }
1920
1921 mutex_unlock(&hw->tnl_lock);
1922
1923 return res;
1924}
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
1937 u16 idx)
1938{
1939 u16 i;
1940
1941 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1942 if (hw->tnl.tbl[i].valid &&
1943 hw->tnl.tbl[i].type == type &&
1944 idx-- == 0)
1945 return i;
1946
1947 WARN_ON_ONCE(1);
1948 return 0;
1949}
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962static enum ice_status
1963ice_create_tunnel(struct ice_hw *hw, u16 index,
1964 enum ice_tunnel_type type, u16 port)
1965{
1966 struct ice_boost_tcam_section *sect_rx, *sect_tx;
1967 enum ice_status status = ICE_ERR_MAX_LIMIT;
1968 struct ice_buf_build *bld;
1969
1970 mutex_lock(&hw->tnl_lock);
1971
1972 bld = ice_pkg_buf_alloc(hw);
1973 if (!bld) {
1974 status = ICE_ERR_NO_MEMORY;
1975 goto ice_create_tunnel_end;
1976 }
1977
1978
1979 if (ice_pkg_buf_reserve_section(bld, 2))
1980 goto ice_create_tunnel_err;
1981
1982 sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
1983 struct_size(sect_rx, tcam, 1));
1984 if (!sect_rx)
1985 goto ice_create_tunnel_err;
1986 sect_rx->count = cpu_to_le16(1);
1987
1988 sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
1989 struct_size(sect_tx, tcam, 1));
1990 if (!sect_tx)
1991 goto ice_create_tunnel_err;
1992 sect_tx->count = cpu_to_le16(1);
1993
1994
1995 memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
1996 sizeof(*sect_rx->tcam));
1997
1998
1999
2000
2001 ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
2002 (u8 *)&port, NULL, NULL, NULL,
2003 (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
2004 sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
2005
2006
2007 memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
2008
2009 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2010 if (!status)
2011 hw->tnl.tbl[index].port = port;
2012
2013ice_create_tunnel_err:
2014 ice_pkg_buf_free(hw, bld);
2015
2016ice_create_tunnel_end:
2017 mutex_unlock(&hw->tnl_lock);
2018
2019 return status;
2020}
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033static enum ice_status
2034ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type,
2035 u16 port)
2036{
2037 struct ice_boost_tcam_section *sect_rx, *sect_tx;
2038 enum ice_status status = ICE_ERR_MAX_LIMIT;
2039 struct ice_buf_build *bld;
2040
2041 mutex_lock(&hw->tnl_lock);
2042
2043 if (WARN_ON(!hw->tnl.tbl[index].valid ||
2044 hw->tnl.tbl[index].type != type ||
2045 hw->tnl.tbl[index].port != port)) {
2046 status = ICE_ERR_OUT_OF_RANGE;
2047 goto ice_destroy_tunnel_end;
2048 }
2049
2050 bld = ice_pkg_buf_alloc(hw);
2051 if (!bld) {
2052 status = ICE_ERR_NO_MEMORY;
2053 goto ice_destroy_tunnel_end;
2054 }
2055
2056
2057 if (ice_pkg_buf_reserve_section(bld, 2))
2058 goto ice_destroy_tunnel_err;
2059
2060 sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
2061 struct_size(sect_rx, tcam, 1));
2062 if (!sect_rx)
2063 goto ice_destroy_tunnel_err;
2064 sect_rx->count = cpu_to_le16(1);
2065
2066 sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
2067 struct_size(sect_tx, tcam, 1));
2068 if (!sect_tx)
2069 goto ice_destroy_tunnel_err;
2070 sect_tx->count = cpu_to_le16(1);
2071
2072
2073
2074
2075 memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
2076 sizeof(*sect_rx->tcam));
2077 memcpy(sect_tx->tcam, hw->tnl.tbl[index].boost_entry,
2078 sizeof(*sect_tx->tcam));
2079
2080 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2081 if (!status)
2082 hw->tnl.tbl[index].port = 0;
2083
2084ice_destroy_tunnel_err:
2085 ice_pkg_buf_free(hw, bld);
2086
2087ice_destroy_tunnel_end:
2088 mutex_unlock(&hw->tnl_lock);
2089
2090 return status;
2091}
2092
2093int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
2094 unsigned int idx, struct udp_tunnel_info *ti)
2095{
2096 struct ice_netdev_priv *np = netdev_priv(netdev);
2097 struct ice_vsi *vsi = np->vsi;
2098 struct ice_pf *pf = vsi->back;
2099 enum ice_tunnel_type tnl_type;
2100 enum ice_status status;
2101 u16 index;
2102
2103 tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
2104 index = ice_tunnel_idx_to_entry(&pf->hw, tnl_type, idx);
2105
2106 status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
2107 if (status) {
2108 netdev_err(netdev, "Error adding UDP tunnel - %s\n",
2109 ice_stat_str(status));
2110 return -EIO;
2111 }
2112
2113 udp_tunnel_nic_set_port_priv(netdev, table, idx, index);
2114 return 0;
2115}
2116
2117int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
2118 unsigned int idx, struct udp_tunnel_info *ti)
2119{
2120 struct ice_netdev_priv *np = netdev_priv(netdev);
2121 struct ice_vsi *vsi = np->vsi;
2122 struct ice_pf *pf = vsi->back;
2123 enum ice_tunnel_type tnl_type;
2124 enum ice_status status;
2125
2126 tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
2127
2128 status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type,
2129 ntohs(ti->port));
2130 if (status) {
2131 netdev_err(netdev, "Error removing UDP tunnel - %s\n",
2132 ice_stat_str(status));
2133 return -EIO;
2134 }
2135
2136 return 0;
2137}
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148enum ice_status
2149ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
2150 u8 *prot, u16 *off)
2151{
2152 struct ice_fv_word *fv_ext;
2153
2154 if (prof >= hw->blk[blk].es.count)
2155 return ICE_ERR_PARAM;
2156
2157 if (fv_idx >= hw->blk[blk].es.fvw)
2158 return ICE_ERR_PARAM;
2159
2160 fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
2161
2162 *prot = fv_ext[fv_idx].prot_id;
2163 *off = fv_ext[fv_idx].off;
2164
2165 return 0;
2166}
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181static enum ice_status
2182ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
2183{
2184 if (ptype >= ICE_XLT1_CNT || !ptg)
2185 return ICE_ERR_PARAM;
2186
2187 *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
2188 return 0;
2189}
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
2201{
2202 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
2203}
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215static enum ice_status
2216ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2217{
2218 struct ice_ptg_ptype **ch;
2219 struct ice_ptg_ptype *p;
2220
2221 if (ptype > ICE_XLT1_CNT - 1)
2222 return ICE_ERR_PARAM;
2223
2224 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
2225 return ICE_ERR_DOES_NOT_EXIST;
2226
2227
2228 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
2229 return ICE_ERR_CFG;
2230
2231
2232 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2233 ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2234 while (p) {
2235 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
2236 *ch = p->next_ptype;
2237 break;
2238 }
2239
2240 ch = &p->next_ptype;
2241 p = p->next_ptype;
2242 }
2243
2244 hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
2245 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
2246
2247 return 0;
2248}
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262static enum ice_status
2263ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2264{
2265 enum ice_status status;
2266 u8 original_ptg;
2267
2268 if (ptype > ICE_XLT1_CNT - 1)
2269 return ICE_ERR_PARAM;
2270
2271 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
2272 return ICE_ERR_DOES_NOT_EXIST;
2273
2274 status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
2275 if (status)
2276 return status;
2277
2278
2279 if (original_ptg == ptg)
2280 return 0;
2281
2282
2283 if (original_ptg != ICE_DEFAULT_PTG)
2284 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
2285
2286
2287 if (ptg == ICE_DEFAULT_PTG)
2288 return 0;
2289
2290
2291 hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
2292 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2293 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
2294 &hw->blk[blk].xlt1.ptypes[ptype];
2295
2296 hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
2297 hw->blk[blk].xlt1.t[ptype] = ptg;
2298
2299 return 0;
2300}
2301
2302
2303struct ice_blk_size_details {
2304 u16 xlt1;
2305 u16 xlt2;
2306 u16 prof_tcam;
2307 u16 prof_id;
2308 u8 prof_cdid_bits;
2309 u16 prof_redir;
2310 u16 es;
2311 u16 fvw;
2312 u8 overwrite;
2313 u8 reverse;
2314};
2315
2316static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331 { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48,
2332 false, false },
2333 { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32,
2334 false, false },
2335 { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2336 false, true },
2337 { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2338 true, true },
2339 { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24,
2340 false, false },
2341};
2342
2343enum ice_sid_all {
2344 ICE_SID_XLT1_OFF = 0,
2345 ICE_SID_XLT2_OFF,
2346 ICE_SID_PR_OFF,
2347 ICE_SID_PR_REDIR_OFF,
2348 ICE_SID_ES_OFF,
2349 ICE_SID_OFF_COUNT,
2350};
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361static bool
2362ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
2363{
2364 struct ice_vsig_prof *tmp1;
2365 struct ice_vsig_prof *tmp2;
2366 u16 chk_count = 0;
2367 u16 count = 0;
2368
2369
2370 list_for_each_entry(tmp1, list1, list)
2371 count++;
2372 list_for_each_entry(tmp2, list2, list)
2373 chk_count++;
2374
2375 if (!count || count != chk_count)
2376 return false;
2377
2378 tmp1 = list_first_entry(list1, struct ice_vsig_prof, list);
2379 tmp2 = list_first_entry(list2, struct ice_vsig_prof, list);
2380
2381
2382
2383
2384 while (count--) {
2385 if (tmp2->profile_cookie != tmp1->profile_cookie)
2386 return false;
2387
2388 tmp1 = list_next_entry(tmp1, list);
2389 tmp2 = list_next_entry(tmp2, list);
2390 }
2391
2392 return true;
2393}
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407static enum ice_status
2408ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
2409{
2410 if (!vsig || vsi >= ICE_MAX_VSI)
2411 return ICE_ERR_PARAM;
2412
2413
2414
2415
2416
2417 *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
2418
2419 return 0;
2420}
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2431{
2432 u16 idx = vsig & ICE_VSIG_IDX_M;
2433
2434 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
2435 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2436 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
2437 }
2438
2439 return ICE_VSIG_VALUE(idx, hw->pf_id);
2440}
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
2451{
2452 u16 i;
2453
2454 for (i = 1; i < ICE_MAX_VSIGS; i++)
2455 if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2456 return ice_vsig_alloc_val(hw, blk, i);
2457
2458 return ICE_DEFAULT_VSIG;
2459}
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476static enum ice_status
2477ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
2478 struct list_head *chs, u16 *vsig)
2479{
2480 struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
2481 u16 i;
2482
2483 for (i = 0; i < xlt2->count; i++)
2484 if (xlt2->vsig_tbl[i].in_use &&
2485 ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
2486 *vsig = ICE_VSIG_VALUE(i, hw->pf_id);
2487 return 0;
2488 }
2489
2490 return ICE_ERR_DOES_NOT_EXIST;
2491}
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502static enum ice_status
2503ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2504{
2505 struct ice_vsig_prof *dtmp, *del;
2506 struct ice_vsig_vsi *vsi_cur;
2507 u16 idx;
2508
2509 idx = vsig & ICE_VSIG_IDX_M;
2510 if (idx >= ICE_MAX_VSIGS)
2511 return ICE_ERR_PARAM;
2512
2513 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2514 return ICE_ERR_DOES_NOT_EXIST;
2515
2516 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
2517
2518 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2519
2520
2521
2522 if (vsi_cur) {
2523
2524 do {
2525 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
2526
2527 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2528 vsi_cur->changed = 1;
2529 vsi_cur->next_vsi = NULL;
2530 vsi_cur = tmp;
2531 } while (vsi_cur);
2532
2533
2534 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
2535 }
2536
2537
2538 list_for_each_entry_safe(del, dtmp,
2539 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2540 list) {
2541 list_del(&del->list);
2542 devm_kfree(ice_hw_to_dev(hw), del);
2543 }
2544
2545
2546
2547
2548 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2549
2550 return 0;
2551}
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563static enum ice_status
2564ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2565{
2566 struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
2567 u16 idx;
2568
2569 idx = vsig & ICE_VSIG_IDX_M;
2570
2571 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2572 return ICE_ERR_PARAM;
2573
2574 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2575 return ICE_ERR_DOES_NOT_EXIST;
2576
2577
2578 if (idx == ICE_DEFAULT_VSIG)
2579 return 0;
2580
2581 vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2582 if (!(*vsi_head))
2583 return ICE_ERR_CFG;
2584
2585 vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
2586 vsi_cur = (*vsi_head);
2587
2588
2589 while (vsi_cur) {
2590 if (vsi_tgt == vsi_cur) {
2591 (*vsi_head) = vsi_cur->next_vsi;
2592 break;
2593 }
2594 vsi_head = &vsi_cur->next_vsi;
2595 vsi_cur = vsi_cur->next_vsi;
2596 }
2597
2598
2599 if (!vsi_cur)
2600 return ICE_ERR_DOES_NOT_EXIST;
2601
2602 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2603 vsi_cur->changed = 1;
2604 vsi_cur->next_vsi = NULL;
2605
2606 return 0;
2607}
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621static enum ice_status
2622ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2623{
2624 struct ice_vsig_vsi *tmp;
2625 enum ice_status status;
2626 u16 orig_vsig, idx;
2627
2628 idx = vsig & ICE_VSIG_IDX_M;
2629
2630 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2631 return ICE_ERR_PARAM;
2632
2633
2634
2635
2636 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
2637 vsig != ICE_DEFAULT_VSIG)
2638 return ICE_ERR_DOES_NOT_EXIST;
2639
2640 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
2641 if (status)
2642 return status;
2643
2644
2645 if (orig_vsig == vsig)
2646 return 0;
2647
2648 if (orig_vsig != ICE_DEFAULT_VSIG) {
2649
2650 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
2651 if (status)
2652 return status;
2653 }
2654
2655 if (idx == ICE_DEFAULT_VSIG)
2656 return 0;
2657
2658
2659 hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
2660 hw->blk[blk].xlt2.vsis[vsi].changed = 1;
2661
2662
2663 tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2664 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
2665 &hw->blk[blk].xlt2.vsis[vsi];
2666 hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
2667 hw->blk[blk].xlt2.t[vsi] = vsig;
2668
2669 return 0;
2670}
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680static bool
2681ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
2682 u16 mask)
2683{
2684 bool expect_no_mask = false;
2685 bool found = false;
2686 bool match = false;
2687 u16 i;
2688
2689
2690 if (mask == 0 || mask == 0xffff)
2691 expect_no_mask = true;
2692
2693
2694 for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
2695 hw->blk[blk].masks.count; i++)
2696 if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
2697 if (hw->blk[blk].masks.masks[i].in_use &&
2698 hw->blk[blk].masks.masks[i].idx == idx) {
2699 found = true;
2700 if (hw->blk[blk].masks.masks[i].mask == mask)
2701 match = true;
2702 break;
2703 }
2704
2705 if (expect_no_mask) {
2706 if (found)
2707 return false;
2708 } else {
2709 if (!match)
2710 return false;
2711 }
2712
2713 return true;
2714}
2715
2716
2717
2718
2719
2720
2721
2722
2723static bool
2724ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
2725{
2726 u16 i;
2727
2728
2729 for (i = 0; i < hw->blk[blk].es.fvw; i++)
2730 if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i]))
2731 return false;
2732
2733 return true;
2734}
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744static enum ice_status
2745ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
2746 struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
2747{
2748 struct ice_es *es = &hw->blk[blk].es;
2749 u8 i;
2750
2751
2752
2753
2754 if (blk == ICE_BLK_FD)
2755 return ICE_ERR_DOES_NOT_EXIST;
2756
2757 for (i = 0; i < (u8)es->count; i++) {
2758 u16 off = i * es->fvw;
2759
2760 if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
2761 continue;
2762
2763
2764 if (masks && !ice_prof_has_mask(hw, blk, i, masks))
2765 continue;
2766
2767 *prof_id = i;
2768 return 0;
2769 }
2770
2771 return ICE_ERR_DOES_NOT_EXIST;
2772}
2773
2774
2775
2776
2777
2778
2779static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2780{
2781 switch (blk) {
2782 case ICE_BLK_FD:
2783 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
2784 break;
2785 case ICE_BLK_RSS:
2786 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
2787 break;
2788 default:
2789 return false;
2790 }
2791 return true;
2792}
2793
2794
2795
2796
2797
2798
2799static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2800{
2801 switch (blk) {
2802 case ICE_BLK_FD:
2803 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
2804 break;
2805 case ICE_BLK_RSS:
2806 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
2807 break;
2808 default:
2809 return false;
2810 }
2811 return true;
2812}
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824static enum ice_status
2825ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
2826 u16 *tcam_idx)
2827{
2828 u16 res_type;
2829
2830 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2831 return ICE_ERR_PARAM;
2832
2833 return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
2834}
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844static enum ice_status
2845ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
2846{
2847 u16 res_type;
2848
2849 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2850 return ICE_ERR_PARAM;
2851
2852 return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
2853}
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864static enum ice_status
2865ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
2866{
2867 enum ice_status status;
2868 u16 res_type;
2869 u16 get_prof;
2870
2871 if (!ice_prof_id_rsrc_type(blk, &res_type))
2872 return ICE_ERR_PARAM;
2873
2874 status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
2875 if (!status)
2876 *prof_id = (u8)get_prof;
2877
2878 return status;
2879}
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889static enum ice_status
2890ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2891{
2892 u16 tmp_prof_id = (u16)prof_id;
2893 u16 res_type;
2894
2895 if (!ice_prof_id_rsrc_type(blk, &res_type))
2896 return ICE_ERR_PARAM;
2897
2898 return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
2899}
2900
2901
2902
2903
2904
2905
2906
2907static enum ice_status
2908ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2909{
2910 if (prof_id > hw->blk[blk].es.count)
2911 return ICE_ERR_PARAM;
2912
2913 hw->blk[blk].es.ref_count[prof_id]++;
2914
2915 return 0;
2916}
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926static void
2927ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
2928 u16 idx, u16 mask)
2929{
2930 u32 offset;
2931 u32 val;
2932
2933 switch (blk) {
2934 case ICE_BLK_RSS:
2935 offset = GLQF_HMASK(mask_idx);
2936 val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M;
2937 val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
2938 break;
2939 case ICE_BLK_FD:
2940 offset = GLQF_FDMASK(mask_idx);
2941 val = (idx << GLQF_FDMASK_MSK_INDEX_S) & GLQF_FDMASK_MSK_INDEX_M;
2942 val |= (mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M;
2943 break;
2944 default:
2945 ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
2946 blk);
2947 return;
2948 }
2949
2950 wr32(hw, offset, val);
2951 ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n",
2952 blk, idx, offset, val);
2953}
2954
2955
2956
2957
2958
2959
2960
2961
2962static void
2963ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,
2964 u16 prof_id, u32 enable_mask)
2965{
2966 u32 offset;
2967
2968 switch (blk) {
2969 case ICE_BLK_RSS:
2970 offset = GLQF_HMASK_SEL(prof_id);
2971 break;
2972 case ICE_BLK_FD:
2973 offset = GLQF_FDMASK_SEL(prof_id);
2974 break;
2975 default:
2976 ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
2977 blk);
2978 return;
2979 }
2980
2981 wr32(hw, offset, enable_mask);
2982 ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n",
2983 blk, prof_id, offset, enable_mask);
2984}
2985
2986
2987
2988
2989
2990
2991static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
2992{
2993 u16 per_pf;
2994 u16 i;
2995
2996 mutex_init(&hw->blk[blk].masks.lock);
2997
2998 per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
2999
3000 hw->blk[blk].masks.count = per_pf;
3001 hw->blk[blk].masks.first = hw->pf_id * per_pf;
3002
3003 memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
3004
3005 for (i = hw->blk[blk].masks.first;
3006 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
3007 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
3008}
3009
3010
3011
3012
3013
3014static void ice_init_all_prof_masks(struct ice_hw *hw)
3015{
3016 ice_init_prof_masks(hw, ICE_BLK_RSS);
3017 ice_init_prof_masks(hw, ICE_BLK_FD);
3018}
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028static enum ice_status
3029ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
3030 u16 *mask_idx)
3031{
3032 bool found_unused = false, found_copy = false;
3033 enum ice_status status = ICE_ERR_MAX_LIMIT;
3034 u16 unused_idx = 0, copy_idx = 0;
3035 u16 i;
3036
3037 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3038 return ICE_ERR_PARAM;
3039
3040 mutex_lock(&hw->blk[blk].masks.lock);
3041
3042 for (i = hw->blk[blk].masks.first;
3043 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
3044 if (hw->blk[blk].masks.masks[i].in_use) {
3045
3046
3047
3048 if (hw->blk[blk].masks.masks[i].mask == mask &&
3049 hw->blk[blk].masks.masks[i].idx == idx) {
3050 found_copy = true;
3051 copy_idx = i;
3052 break;
3053 }
3054 } else {
3055
3056
3057
3058 if (!found_unused) {
3059 found_unused = true;
3060 unused_idx = i;
3061 }
3062 }
3063
3064 if (found_copy)
3065 i = copy_idx;
3066 else if (found_unused)
3067 i = unused_idx;
3068 else
3069 goto err_ice_alloc_prof_mask;
3070
3071
3072 if (found_unused) {
3073 hw->blk[blk].masks.masks[i].in_use = true;
3074 hw->blk[blk].masks.masks[i].mask = mask;
3075 hw->blk[blk].masks.masks[i].idx = idx;
3076 hw->blk[blk].masks.masks[i].ref = 0;
3077 ice_write_prof_mask_reg(hw, blk, i, idx, mask);
3078 }
3079
3080 hw->blk[blk].masks.masks[i].ref++;
3081 *mask_idx = i;
3082 status = 0;
3083
3084err_ice_alloc_prof_mask:
3085 mutex_unlock(&hw->blk[blk].masks.lock);
3086
3087 return status;
3088}
3089
3090
3091
3092
3093
3094
3095
3096static enum ice_status
3097ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
3098{
3099 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3100 return ICE_ERR_PARAM;
3101
3102 if (!(mask_idx >= hw->blk[blk].masks.first &&
3103 mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
3104 return ICE_ERR_DOES_NOT_EXIST;
3105
3106 mutex_lock(&hw->blk[blk].masks.lock);
3107
3108 if (!hw->blk[blk].masks.masks[mask_idx].in_use)
3109 goto exit_ice_free_prof_mask;
3110
3111 if (hw->blk[blk].masks.masks[mask_idx].ref > 1) {
3112 hw->blk[blk].masks.masks[mask_idx].ref--;
3113 goto exit_ice_free_prof_mask;
3114 }
3115
3116
3117 hw->blk[blk].masks.masks[mask_idx].in_use = false;
3118 hw->blk[blk].masks.masks[mask_idx].mask = 0;
3119 hw->blk[blk].masks.masks[mask_idx].idx = 0;
3120
3121
3122 ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk,
3123 mask_idx);
3124 ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);
3125
3126exit_ice_free_prof_mask:
3127 mutex_unlock(&hw->blk[blk].masks.lock);
3128
3129 return 0;
3130}
3131
3132
3133
3134
3135
3136
3137
3138static enum ice_status
3139ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
3140{
3141 u32 mask_bm;
3142 u16 i;
3143
3144 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3145 return ICE_ERR_PARAM;
3146
3147 mask_bm = hw->blk[blk].es.mask_ena[prof_id];
3148 for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
3149 if (mask_bm & BIT(i))
3150 ice_free_prof_mask(hw, blk, i);
3151
3152 return 0;
3153}
3154
3155
3156
3157
3158
3159
3160
3161
3162static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk)
3163{
3164 u16 i;
3165
3166 mutex_lock(&hw->blk[blk].masks.lock);
3167
3168 for (i = hw->blk[blk].masks.first;
3169 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) {
3170 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
3171
3172 hw->blk[blk].masks.masks[i].in_use = false;
3173 hw->blk[blk].masks.masks[i].idx = 0;
3174 hw->blk[blk].masks.masks[i].mask = 0;
3175 }
3176
3177 mutex_unlock(&hw->blk[blk].masks.lock);
3178 mutex_destroy(&hw->blk[blk].masks.lock);
3179}
3180
3181
3182
3183
3184
3185
3186
3187static void ice_shutdown_all_prof_masks(struct ice_hw *hw)
3188{
3189 ice_shutdown_prof_masks(hw, ICE_BLK_RSS);
3190 ice_shutdown_prof_masks(hw, ICE_BLK_FD);
3191}
3192
3193
3194
3195
3196
3197
3198
3199
3200static enum ice_status
3201ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
3202 u16 *masks)
3203{
3204 bool err = false;
3205 u32 ena_mask = 0;
3206 u16 idx;
3207 u16 i;
3208
3209
3210 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3211 return 0;
3212
3213 for (i = 0; i < hw->blk[blk].es.fvw; i++)
3214 if (masks[i] && masks[i] != 0xFFFF) {
3215 if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) {
3216 ena_mask |= BIT(idx);
3217 } else {
3218
3219 err = true;
3220 break;
3221 }
3222 }
3223
3224 if (err) {
3225
3226 for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++)
3227 if (ena_mask & BIT(i))
3228 ice_free_prof_mask(hw, blk, i);
3229
3230 return ICE_ERR_OUT_OF_RANGE;
3231 }
3232
3233
3234 ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask);
3235
3236
3237 hw->blk[blk].es.mask_ena[prof_id] = ena_mask;
3238
3239 return 0;
3240}
3241
3242
3243
3244
3245
3246
3247
3248
3249static void
3250ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
3251 struct ice_fv_word *fv)
3252{
3253 u16 off;
3254
3255 off = prof_id * hw->blk[blk].es.fvw;
3256 if (!fv) {
3257 memset(&hw->blk[blk].es.t[off], 0,
3258 hw->blk[blk].es.fvw * sizeof(*fv));
3259 hw->blk[blk].es.written[prof_id] = false;
3260 } else {
3261 memcpy(&hw->blk[blk].es.t[off], fv,
3262 hw->blk[blk].es.fvw * sizeof(*fv));
3263 }
3264}
3265
3266
3267
3268
3269
3270
3271
3272static enum ice_status
3273ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3274{
3275 if (prof_id > hw->blk[blk].es.count)
3276 return ICE_ERR_PARAM;
3277
3278 if (hw->blk[blk].es.ref_count[prof_id] > 0) {
3279 if (!--hw->blk[blk].es.ref_count[prof_id]) {
3280 ice_write_es(hw, blk, prof_id, NULL);
3281 ice_free_prof_masks(hw, blk, prof_id);
3282 return ice_free_prof_id(hw, blk, prof_id);
3283 }
3284 }
3285
3286 return 0;
3287}
3288
3289
3290static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
3291
3292 { ICE_SID_XLT1_SW,
3293 ICE_SID_XLT2_SW,
3294 ICE_SID_PROFID_TCAM_SW,
3295 ICE_SID_PROFID_REDIR_SW,
3296 ICE_SID_FLD_VEC_SW
3297 },
3298
3299
3300 { ICE_SID_XLT1_ACL,
3301 ICE_SID_XLT2_ACL,
3302 ICE_SID_PROFID_TCAM_ACL,
3303 ICE_SID_PROFID_REDIR_ACL,
3304 ICE_SID_FLD_VEC_ACL
3305 },
3306
3307
3308 { ICE_SID_XLT1_FD,
3309 ICE_SID_XLT2_FD,
3310 ICE_SID_PROFID_TCAM_FD,
3311 ICE_SID_PROFID_REDIR_FD,
3312 ICE_SID_FLD_VEC_FD
3313 },
3314
3315
3316 { ICE_SID_XLT1_RSS,
3317 ICE_SID_XLT2_RSS,
3318 ICE_SID_PROFID_TCAM_RSS,
3319 ICE_SID_PROFID_REDIR_RSS,
3320 ICE_SID_FLD_VEC_RSS
3321 },
3322
3323
3324 { ICE_SID_XLT1_PE,
3325 ICE_SID_XLT2_PE,
3326 ICE_SID_PROFID_TCAM_PE,
3327 ICE_SID_PROFID_REDIR_PE,
3328 ICE_SID_FLD_VEC_PE
3329 }
3330};
3331
3332
3333
3334
3335
3336
3337static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
3338{
3339 u16 pt;
3340
3341 for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
3342 u8 ptg;
3343
3344 ptg = hw->blk[blk].xlt1.t[pt];
3345 if (ptg != ICE_DEFAULT_PTG) {
3346 ice_ptg_alloc_val(hw, blk, ptg);
3347 ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
3348 }
3349 }
3350}
3351
3352
3353
3354
3355
3356
3357static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
3358{
3359 u16 vsi;
3360
3361 for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
3362 u16 vsig;
3363
3364 vsig = hw->blk[blk].xlt2.t[vsi];
3365 if (vsig) {
3366 ice_vsig_alloc_val(hw, blk, vsig);
3367 ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
3368
3369
3370
3371 hw->blk[blk].xlt2.vsis[vsi].changed = 0;
3372 }
3373 }
3374}
3375
3376
3377
3378
3379
3380static void ice_init_sw_db(struct ice_hw *hw)
3381{
3382 u16 i;
3383
3384 for (i = 0; i < ICE_BLK_COUNT; i++) {
3385 ice_init_sw_xlt1_db(hw, (enum ice_block)i);
3386 ice_init_sw_xlt2_db(hw, (enum ice_block)i);
3387 }
3388}
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
3403{
3404 u32 dst_len, sect_len, offset = 0;
3405 struct ice_prof_redir_section *pr;
3406 struct ice_prof_id_section *pid;
3407 struct ice_xlt1_section *xlt1;
3408 struct ice_xlt2_section *xlt2;
3409 struct ice_sw_fv_section *es;
3410 struct ice_pkg_enum state;
3411 u8 *src, *dst;
3412 void *sect;
3413
3414
3415
3416
3417
3418 if (!hw->seg) {
3419 ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
3420 return;
3421 }
3422
3423 memset(&state, 0, sizeof(state));
3424
3425 sect = ice_pkg_enum_section(hw->seg, &state, sid);
3426
3427 while (sect) {
3428 switch (sid) {
3429 case ICE_SID_XLT1_SW:
3430 case ICE_SID_XLT1_FD:
3431 case ICE_SID_XLT1_RSS:
3432 case ICE_SID_XLT1_ACL:
3433 case ICE_SID_XLT1_PE:
3434 xlt1 = sect;
3435 src = xlt1->value;
3436 sect_len = le16_to_cpu(xlt1->count) *
3437 sizeof(*hw->blk[block_id].xlt1.t);
3438 dst = hw->blk[block_id].xlt1.t;
3439 dst_len = hw->blk[block_id].xlt1.count *
3440 sizeof(*hw->blk[block_id].xlt1.t);
3441 break;
3442 case ICE_SID_XLT2_SW:
3443 case ICE_SID_XLT2_FD:
3444 case ICE_SID_XLT2_RSS:
3445 case ICE_SID_XLT2_ACL:
3446 case ICE_SID_XLT2_PE:
3447 xlt2 = sect;
3448 src = (__force u8 *)xlt2->value;
3449 sect_len = le16_to_cpu(xlt2->count) *
3450 sizeof(*hw->blk[block_id].xlt2.t);
3451 dst = (u8 *)hw->blk[block_id].xlt2.t;
3452 dst_len = hw->blk[block_id].xlt2.count *
3453 sizeof(*hw->blk[block_id].xlt2.t);
3454 break;
3455 case ICE_SID_PROFID_TCAM_SW:
3456 case ICE_SID_PROFID_TCAM_FD:
3457 case ICE_SID_PROFID_TCAM_RSS:
3458 case ICE_SID_PROFID_TCAM_ACL:
3459 case ICE_SID_PROFID_TCAM_PE:
3460 pid = sect;
3461 src = (u8 *)pid->entry;
3462 sect_len = le16_to_cpu(pid->count) *
3463 sizeof(*hw->blk[block_id].prof.t);
3464 dst = (u8 *)hw->blk[block_id].prof.t;
3465 dst_len = hw->blk[block_id].prof.count *
3466 sizeof(*hw->blk[block_id].prof.t);
3467 break;
3468 case ICE_SID_PROFID_REDIR_SW:
3469 case ICE_SID_PROFID_REDIR_FD:
3470 case ICE_SID_PROFID_REDIR_RSS:
3471 case ICE_SID_PROFID_REDIR_ACL:
3472 case ICE_SID_PROFID_REDIR_PE:
3473 pr = sect;
3474 src = pr->redir_value;
3475 sect_len = le16_to_cpu(pr->count) *
3476 sizeof(*hw->blk[block_id].prof_redir.t);
3477 dst = hw->blk[block_id].prof_redir.t;
3478 dst_len = hw->blk[block_id].prof_redir.count *
3479 sizeof(*hw->blk[block_id].prof_redir.t);
3480 break;
3481 case ICE_SID_FLD_VEC_SW:
3482 case ICE_SID_FLD_VEC_FD:
3483 case ICE_SID_FLD_VEC_RSS:
3484 case ICE_SID_FLD_VEC_ACL:
3485 case ICE_SID_FLD_VEC_PE:
3486 es = sect;
3487 src = (u8 *)es->fv;
3488 sect_len = (u32)(le16_to_cpu(es->count) *
3489 hw->blk[block_id].es.fvw) *
3490 sizeof(*hw->blk[block_id].es.t);
3491 dst = (u8 *)hw->blk[block_id].es.t;
3492 dst_len = (u32)(hw->blk[block_id].es.count *
3493 hw->blk[block_id].es.fvw) *
3494 sizeof(*hw->blk[block_id].es.t);
3495 break;
3496 default:
3497 return;
3498 }
3499
3500
3501
3502
3503 if (offset > dst_len)
3504 return;
3505
3506
3507
3508
3509
3510
3511 if ((offset + sect_len) > dst_len)
3512 sect_len = dst_len - offset;
3513
3514 memcpy(dst + offset, src, sect_len);
3515 offset += sect_len;
3516 sect = ice_pkg_enum_section(NULL, &state, sid);
3517 }
3518}
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528void ice_fill_blk_tbls(struct ice_hw *hw)
3529{
3530 u8 i;
3531
3532 for (i = 0; i < ICE_BLK_COUNT; i++) {
3533 enum ice_block blk_id = (enum ice_block)i;
3534
3535 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
3536 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
3537 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
3538 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
3539 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
3540 }
3541
3542 ice_init_sw_db(hw);
3543}
3544
3545
3546
3547
3548
3549
3550static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
3551{
3552 struct ice_es *es = &hw->blk[blk_idx].es;
3553 struct ice_prof_map *del, *tmp;
3554
3555 mutex_lock(&es->prof_map_lock);
3556 list_for_each_entry_safe(del, tmp, &es->prof_map, list) {
3557 list_del(&del->list);
3558 devm_kfree(ice_hw_to_dev(hw), del);
3559 }
3560 INIT_LIST_HEAD(&es->prof_map);
3561 mutex_unlock(&es->prof_map_lock);
3562}
3563
3564
3565
3566
3567
3568
3569static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
3570{
3571 struct ice_flow_prof *p, *tmp;
3572
3573 mutex_lock(&hw->fl_profs_locks[blk_idx]);
3574 list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) {
3575 struct ice_flow_entry *e, *t;
3576
3577 list_for_each_entry_safe(e, t, &p->entries, l_entry)
3578 ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
3579 ICE_FLOW_ENTRY_HNDL(e));
3580
3581 list_del(&p->l_entry);
3582
3583 mutex_destroy(&p->entries_lock);
3584 devm_kfree(ice_hw_to_dev(hw), p);
3585 }
3586 mutex_unlock(&hw->fl_profs_locks[blk_idx]);
3587
3588
3589
3590
3591 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3592}
3593
3594
3595
3596
3597
3598
3599static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
3600{
3601 u16 i;
3602
3603 if (!hw->blk[blk].xlt2.vsig_tbl)
3604 return;
3605
3606 for (i = 1; i < ICE_MAX_VSIGS; i++)
3607 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
3608 ice_vsig_free(hw, blk, i);
3609}
3610
3611
3612
3613
3614
3615void ice_free_hw_tbls(struct ice_hw *hw)
3616{
3617 struct ice_rss_cfg *r, *rt;
3618 u8 i;
3619
3620 for (i = 0; i < ICE_BLK_COUNT; i++) {
3621 if (hw->blk[i].is_list_init) {
3622 struct ice_es *es = &hw->blk[i].es;
3623
3624 ice_free_prof_map(hw, i);
3625 mutex_destroy(&es->prof_map_lock);
3626
3627 ice_free_flow_profs(hw, i);
3628 mutex_destroy(&hw->fl_profs_locks[i]);
3629
3630 hw->blk[i].is_list_init = false;
3631 }
3632 ice_free_vsig_tbl(hw, (enum ice_block)i);
3633 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes);
3634 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl);
3635 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
3636 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t);
3637 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl);
3638 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis);
3639 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t);
3640 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
3641 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
3642 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
3643 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
3644 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena);
3645 }
3646
3647 list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
3648 list_del(&r->l_entry);
3649 devm_kfree(ice_hw_to_dev(hw), r);
3650 }
3651 mutex_destroy(&hw->rss_locks);
3652 ice_shutdown_all_prof_masks(hw);
3653 memset(hw->blk, 0, sizeof(hw->blk));
3654}
3655
3656
3657
3658
3659
3660
3661static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
3662{
3663 mutex_init(&hw->fl_profs_locks[blk_idx]);
3664 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3665}
3666
3667
3668
3669
3670
3671void ice_clear_hw_tbls(struct ice_hw *hw)
3672{
3673 u8 i;
3674
3675 for (i = 0; i < ICE_BLK_COUNT; i++) {
3676 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3677 struct ice_prof_tcam *prof = &hw->blk[i].prof;
3678 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3679 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3680 struct ice_es *es = &hw->blk[i].es;
3681
3682 if (hw->blk[i].is_list_init) {
3683 ice_free_prof_map(hw, i);
3684 ice_free_flow_profs(hw, i);
3685 }
3686
3687 ice_free_vsig_tbl(hw, (enum ice_block)i);
3688
3689 memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes));
3690 memset(xlt1->ptg_tbl, 0,
3691 ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl));
3692 memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t));
3693
3694 memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis));
3695 memset(xlt2->vsig_tbl, 0,
3696 xlt2->count * sizeof(*xlt2->vsig_tbl));
3697 memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t));
3698
3699 memset(prof->t, 0, prof->count * sizeof(*prof->t));
3700 memset(prof_redir->t, 0,
3701 prof_redir->count * sizeof(*prof_redir->t));
3702
3703 memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
3704 memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
3705 memset(es->written, 0, es->count * sizeof(*es->written));
3706 memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena));
3707 }
3708}
3709
3710
3711
3712
3713
3714enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
3715{
3716 u8 i;
3717
3718 mutex_init(&hw->rss_locks);
3719 INIT_LIST_HEAD(&hw->rss_list_head);
3720 ice_init_all_prof_masks(hw);
3721 for (i = 0; i < ICE_BLK_COUNT; i++) {
3722 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3723 struct ice_prof_tcam *prof = &hw->blk[i].prof;
3724 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3725 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3726 struct ice_es *es = &hw->blk[i].es;
3727 u16 j;
3728
3729 if (hw->blk[i].is_list_init)
3730 continue;
3731
3732 ice_init_flow_profs(hw, i);
3733 mutex_init(&es->prof_map_lock);
3734 INIT_LIST_HEAD(&es->prof_map);
3735 hw->blk[i].is_list_init = true;
3736
3737 hw->blk[i].overwrite = blk_sizes[i].overwrite;
3738 es->reverse = blk_sizes[i].reverse;
3739
3740 xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
3741 xlt1->count = blk_sizes[i].xlt1;
3742
3743 xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
3744 sizeof(*xlt1->ptypes), GFP_KERNEL);
3745
3746 if (!xlt1->ptypes)
3747 goto err;
3748
3749 xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS,
3750 sizeof(*xlt1->ptg_tbl),
3751 GFP_KERNEL);
3752
3753 if (!xlt1->ptg_tbl)
3754 goto err;
3755
3756 xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
3757 sizeof(*xlt1->t), GFP_KERNEL);
3758 if (!xlt1->t)
3759 goto err;
3760
3761 xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
3762 xlt2->count = blk_sizes[i].xlt2;
3763
3764 xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3765 sizeof(*xlt2->vsis), GFP_KERNEL);
3766
3767 if (!xlt2->vsis)
3768 goto err;
3769
3770 xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3771 sizeof(*xlt2->vsig_tbl),
3772 GFP_KERNEL);
3773 if (!xlt2->vsig_tbl)
3774 goto err;
3775
3776 for (j = 0; j < xlt2->count; j++)
3777 INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
3778
3779 xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3780 sizeof(*xlt2->t), GFP_KERNEL);
3781 if (!xlt2->t)
3782 goto err;
3783
3784 prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
3785 prof->count = blk_sizes[i].prof_tcam;
3786 prof->max_prof_id = blk_sizes[i].prof_id;
3787 prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
3788 prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count,
3789 sizeof(*prof->t), GFP_KERNEL);
3790
3791 if (!prof->t)
3792 goto err;
3793
3794 prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
3795 prof_redir->count = blk_sizes[i].prof_redir;
3796 prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw),
3797 prof_redir->count,
3798 sizeof(*prof_redir->t),
3799 GFP_KERNEL);
3800
3801 if (!prof_redir->t)
3802 goto err;
3803
3804 es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
3805 es->count = blk_sizes[i].es;
3806 es->fvw = blk_sizes[i].fvw;
3807 es->t = devm_kcalloc(ice_hw_to_dev(hw),
3808 (u32)(es->count * es->fvw),
3809 sizeof(*es->t), GFP_KERNEL);
3810 if (!es->t)
3811 goto err;
3812
3813 es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3814 sizeof(*es->ref_count),
3815 GFP_KERNEL);
3816 if (!es->ref_count)
3817 goto err;
3818
3819 es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3820 sizeof(*es->written), GFP_KERNEL);
3821 if (!es->written)
3822 goto err;
3823
3824 es->mask_ena = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3825 sizeof(*es->mask_ena), GFP_KERNEL);
3826 if (!es->mask_ena)
3827 goto err;
3828 }
3829 return 0;
3830
3831err:
3832 ice_free_hw_tbls(hw);
3833 return ICE_ERR_NO_MEMORY;
3834}
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849static enum ice_status
3850ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
3851 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3852 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
3853 u8 key[ICE_TCAM_KEY_SZ])
3854{
3855 struct ice_prof_id_key inkey;
3856
3857 inkey.xlt1 = ptg;
3858 inkey.xlt2_cdid = cpu_to_le16(vsig);
3859 inkey.flags = cpu_to_le16(flags);
3860
3861 switch (hw->blk[blk].prof.cdid_bits) {
3862 case 0:
3863 break;
3864 case 2:
3865#define ICE_CD_2_M 0xC000U
3866#define ICE_CD_2_S 14
3867 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_2_M);
3868 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_2_S);
3869 break;
3870 case 4:
3871#define ICE_CD_4_M 0xF000U
3872#define ICE_CD_4_S 12
3873 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_4_M);
3874 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_4_S);
3875 break;
3876 case 8:
3877#define ICE_CD_8_M 0xFF00U
3878#define ICE_CD_8_S 16
3879 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_8_M);
3880 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_8_S);
3881 break;
3882 default:
3883 ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
3884 break;
3885 }
3886
3887 return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
3888 nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
3889}
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905static enum ice_status
3906ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
3907 u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
3908 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3909 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
3910 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
3911{
3912 struct ice_prof_tcam_entry;
3913 enum ice_status status;
3914
3915 status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
3916 dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
3917 if (!status) {
3918 hw->blk[blk].prof.t[idx].addr = cpu_to_le16(idx);
3919 hw->blk[blk].prof.t[idx].prof_id = prof_id;
3920 }
3921
3922 return status;
3923}
3924
3925
3926
3927
3928
3929
3930
3931
3932static enum ice_status
3933ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
3934{
3935 u16 idx = vsig & ICE_VSIG_IDX_M;
3936 struct ice_vsig_vsi *ptr;
3937
3938 *refs = 0;
3939
3940 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
3941 return ICE_ERR_DOES_NOT_EXIST;
3942
3943 ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3944 while (ptr) {
3945 (*refs)++;
3946 ptr = ptr->next_vsi;
3947 }
3948
3949 return 0;
3950}
3951
3952
3953
3954
3955
3956
3957
3958
3959static bool
3960ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
3961{
3962 u16 idx = vsig & ICE_VSIG_IDX_M;
3963 struct ice_vsig_prof *ent;
3964
3965 list_for_each_entry(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3966 list)
3967 if (ent->profile_cookie == hdl)
3968 return true;
3969
3970 ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n",
3971 vsig);
3972 return false;
3973}
3974
3975
3976
3977
3978
3979
3980
3981
3982static enum ice_status
3983ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
3984 struct ice_buf_build *bld, struct list_head *chgs)
3985{
3986 u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
3987 struct ice_chs_chg *tmp;
3988
3989 list_for_each_entry(tmp, chgs, list_entry)
3990 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
3991 u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
3992 struct ice_pkg_es *p;
3993 u32 id;
3994
3995 id = ice_sect_id(blk, ICE_VEC_TBL);
3996 p = ice_pkg_buf_alloc_section(bld, id,
3997 struct_size(p, es, 1) +
3998 vec_size -
3999 sizeof(p->es[0]));
4000
4001 if (!p)
4002 return ICE_ERR_MAX_LIMIT;
4003
4004 p->count = cpu_to_le16(1);
4005 p->offset = cpu_to_le16(tmp->prof_id);
4006
4007 memcpy(p->es, &hw->blk[blk].es.t[off], vec_size);
4008 }
4009
4010 return 0;
4011}
4012
4013
4014
4015
4016
4017
4018
4019
4020static enum ice_status
4021ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
4022 struct ice_buf_build *bld, struct list_head *chgs)
4023{
4024 struct ice_chs_chg *tmp;
4025
4026 list_for_each_entry(tmp, chgs, list_entry)
4027 if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
4028 struct ice_prof_id_section *p;
4029 u32 id;
4030
4031 id = ice_sect_id(blk, ICE_PROF_TCAM);
4032 p = ice_pkg_buf_alloc_section(bld, id,
4033 struct_size(p, entry, 1));
4034
4035 if (!p)
4036 return ICE_ERR_MAX_LIMIT;
4037
4038 p->count = cpu_to_le16(1);
4039 p->entry[0].addr = cpu_to_le16(tmp->tcam_idx);
4040 p->entry[0].prof_id = tmp->prof_id;
4041
4042 memcpy(p->entry[0].key,
4043 &hw->blk[blk].prof.t[tmp->tcam_idx].key,
4044 sizeof(hw->blk[blk].prof.t->key));
4045 }
4046
4047 return 0;
4048}
4049
4050
4051
4052
4053
4054
4055
4056static enum ice_status
4057ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
4058 struct list_head *chgs)
4059{
4060 struct ice_chs_chg *tmp;
4061
4062 list_for_each_entry(tmp, chgs, list_entry)
4063 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
4064 struct ice_xlt1_section *p;
4065 u32 id;
4066
4067 id = ice_sect_id(blk, ICE_XLT1);
4068 p = ice_pkg_buf_alloc_section(bld, id,
4069 struct_size(p, value, 1));
4070
4071 if (!p)
4072 return ICE_ERR_MAX_LIMIT;
4073
4074 p->count = cpu_to_le16(1);
4075 p->offset = cpu_to_le16(tmp->ptype);
4076 p->value[0] = tmp->ptg;
4077 }
4078
4079 return 0;
4080}
4081
4082
4083
4084
4085
4086
4087
4088static enum ice_status
4089ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
4090 struct list_head *chgs)
4091{
4092 struct ice_chs_chg *tmp;
4093
4094 list_for_each_entry(tmp, chgs, list_entry) {
4095 struct ice_xlt2_section *p;
4096 u32 id;
4097
4098 switch (tmp->type) {
4099 case ICE_VSIG_ADD:
4100 case ICE_VSI_MOVE:
4101 case ICE_VSIG_REM:
4102 id = ice_sect_id(blk, ICE_XLT2);
4103 p = ice_pkg_buf_alloc_section(bld, id,
4104 struct_size(p, value, 1));
4105
4106 if (!p)
4107 return ICE_ERR_MAX_LIMIT;
4108
4109 p->count = cpu_to_le16(1);
4110 p->offset = cpu_to_le16(tmp->vsi);
4111 p->value[0] = cpu_to_le16(tmp->vsig);
4112 break;
4113 default:
4114 break;
4115 }
4116 }
4117
4118 return 0;
4119}
4120
4121
4122
4123
4124
4125
4126
4127static enum ice_status
4128ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
4129 struct list_head *chgs)
4130{
4131 struct ice_buf_build *b;
4132 struct ice_chs_chg *tmp;
4133 enum ice_status status;
4134 u16 pkg_sects;
4135 u16 xlt1 = 0;
4136 u16 xlt2 = 0;
4137 u16 tcam = 0;
4138 u16 es = 0;
4139 u16 sects;
4140
4141
4142 list_for_each_entry(tmp, chgs, list_entry) {
4143 switch (tmp->type) {
4144 case ICE_PTG_ES_ADD:
4145 if (tmp->add_ptg)
4146 xlt1++;
4147 if (tmp->add_prof)
4148 es++;
4149 break;
4150 case ICE_TCAM_ADD:
4151 tcam++;
4152 break;
4153 case ICE_VSIG_ADD:
4154 case ICE_VSI_MOVE:
4155 case ICE_VSIG_REM:
4156 xlt2++;
4157 break;
4158 default:
4159 break;
4160 }
4161 }
4162 sects = xlt1 + xlt2 + tcam + es;
4163
4164 if (!sects)
4165 return 0;
4166
4167
4168 b = ice_pkg_buf_alloc(hw);
4169 if (!b)
4170 return ICE_ERR_NO_MEMORY;
4171
4172 status = ice_pkg_buf_reserve_section(b, sects);
4173 if (status)
4174 goto error_tmp;
4175
4176
4177 if (es) {
4178 status = ice_prof_bld_es(hw, blk, b, chgs);
4179 if (status)
4180 goto error_tmp;
4181 }
4182
4183 if (tcam) {
4184 status = ice_prof_bld_tcam(hw, blk, b, chgs);
4185 if (status)
4186 goto error_tmp;
4187 }
4188
4189 if (xlt1) {
4190 status = ice_prof_bld_xlt1(blk, b, chgs);
4191 if (status)
4192 goto error_tmp;
4193 }
4194
4195 if (xlt2) {
4196 status = ice_prof_bld_xlt2(blk, b, chgs);
4197 if (status)
4198 goto error_tmp;
4199 }
4200
4201
4202
4203
4204
4205 pkg_sects = ice_pkg_buf_get_active_sections(b);
4206 if (!pkg_sects || pkg_sects != sects) {
4207 status = ICE_ERR_INVAL_SIZE;
4208 goto error_tmp;
4209 }
4210
4211
4212 status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
4213 if (status == ICE_ERR_AQ_ERROR)
4214 ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
4215
4216error_tmp:
4217 ice_pkg_buf_free(hw, b);
4218 return status;
4219}
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
4231{
4232 wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
4233
4234 ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
4235 GLQF_FDMASK_SEL(prof_id), mask_sel);
4236}
4237
4238struct ice_fd_src_dst_pair {
4239 u8 prot_id;
4240 u8 count;
4241 u16 off;
4242};
4243
4244static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
4245
4246 { ICE_PROT_IPV4_OF_OR_S, 2, 12 },
4247 { ICE_PROT_IPV4_OF_OR_S, 2, 16 },
4248
4249 { ICE_PROT_IPV4_IL, 2, 12 },
4250 { ICE_PROT_IPV4_IL, 2, 16 },
4251
4252 { ICE_PROT_IPV6_OF_OR_S, 8, 8 },
4253 { ICE_PROT_IPV6_OF_OR_S, 8, 24 },
4254
4255 { ICE_PROT_IPV6_IL, 8, 8 },
4256 { ICE_PROT_IPV6_IL, 8, 24 },
4257
4258 { ICE_PROT_TCP_IL, 1, 0 },
4259 { ICE_PROT_TCP_IL, 1, 2 },
4260
4261 { ICE_PROT_UDP_OF, 1, 0 },
4262 { ICE_PROT_UDP_OF, 1, 2 },
4263
4264 { ICE_PROT_UDP_IL_OR_S, 1, 0 },
4265 { ICE_PROT_UDP_IL_OR_S, 1, 2 },
4266
4267 { ICE_PROT_SCTP_IL, 1, 0 },
4268 { ICE_PROT_SCTP_IL, 1, 2 }
4269};
4270
4271#define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs)
4272
4273
4274
4275
4276
4277
4278
4279static enum ice_status
4280ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
4281{
4282 DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
4283 u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
4284#define ICE_FD_FV_NOT_FOUND (-2)
4285 s8 first_free = ICE_FD_FV_NOT_FOUND;
4286 u8 used[ICE_MAX_FV_WORDS] = { 0 };
4287 s8 orig_free, si;
4288 u32 mask_sel = 0;
4289 u8 i, j, k;
4290
4291 bitmap_zero(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
4292
4293
4294
4295
4296
4297
4298
4299
4300 for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
4301
4302
4303
4304 if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
4305 ICE_PROT_INVALID)
4306 first_free = i - 1;
4307
4308 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4309 if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
4310 es[i].off == ice_fd_pairs[j].off) {
4311 set_bit(j, pair_list);
4312 pair_start[j] = i;
4313 }
4314 }
4315
4316 orig_free = first_free;
4317
4318
4319 for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
4320 u8 bit1 = test_bit(i + 1, pair_list);
4321 u8 bit0 = test_bit(i, pair_list);
4322
4323 if (bit0 ^ bit1) {
4324 u8 index;
4325
4326
4327 if (!bit0)
4328 index = i;
4329 else
4330 index = i + 1;
4331
4332
4333 if (first_free + 1 < (s8)ice_fd_pairs[index].count)
4334 return ICE_ERR_MAX_LIMIT;
4335
4336
4337 for (k = 0; k < ice_fd_pairs[index].count; k++) {
4338 es[first_free - k].prot_id =
4339 ice_fd_pairs[index].prot_id;
4340 es[first_free - k].off =
4341 ice_fd_pairs[index].off + (k * 2);
4342
4343 if (k > first_free)
4344 return ICE_ERR_OUT_OF_RANGE;
4345
4346
4347 mask_sel |= BIT(first_free - k);
4348 }
4349
4350 pair_start[index] = first_free;
4351 first_free -= ice_fd_pairs[index].count;
4352 }
4353 }
4354
4355
4356 si = hw->blk[ICE_BLK_FD].es.fvw - 1;
4357 while (si >= 0) {
4358 u8 indexes_used = 1;
4359
4360
4361#define ICE_SWAP_VALID 0x80
4362 used[si] = si | ICE_SWAP_VALID;
4363
4364 if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
4365 si -= indexes_used;
4366 continue;
4367 }
4368
4369
4370 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4371 if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
4372 es[si].off == ice_fd_pairs[j].off) {
4373 u8 idx;
4374
4375
4376 idx = j + ((j % 2) ? -1 : 1);
4377
4378 indexes_used = ice_fd_pairs[idx].count;
4379 for (k = 0; k < indexes_used; k++) {
4380 used[si - k] = (pair_start[idx] - k) |
4381 ICE_SWAP_VALID;
4382 }
4383
4384 break;
4385 }
4386
4387 si -= indexes_used;
4388 }
4389
4390
4391
4392
4393 for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
4394 u32 raw_swap = 0;
4395 u32 raw_in = 0;
4396
4397 for (k = 0; k < 4; k++) {
4398 u8 idx;
4399
4400 idx = (j * 4) + k;
4401 if (used[idx] && !(mask_sel & BIT(idx))) {
4402 raw_swap |= used[idx] << (k * BITS_PER_BYTE);
4403#define ICE_INSET_DFLT 0x9f
4404 raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
4405 }
4406 }
4407
4408
4409 wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
4410
4411 ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
4412 prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
4413
4414
4415 wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
4416
4417 ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
4418 prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
4419 }
4420
4421
4422 ice_update_fd_mask(hw, prof_id, 0);
4423
4424 return 0;
4425}
4426
4427
4428static const struct ice_ptype_attrib_info ice_ptype_attributes[] = {
4429 { ICE_GTP_PDU_EH, ICE_GTP_PDU_FLAG_MASK },
4430 { ICE_GTP_SESSION, ICE_GTP_FLAGS_MASK },
4431 { ICE_GTP_DOWNLINK, ICE_GTP_FLAGS_MASK },
4432 { ICE_GTP_UPLINK, ICE_GTP_FLAGS_MASK },
4433};
4434
4435
4436
4437
4438
4439
4440static void
4441ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
4442 struct ice_ptype_attrib_info *info)
4443{
4444 *info = ice_ptype_attributes[type];
4445}
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455static enum ice_status
4456ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
4457 const struct ice_ptype_attributes *attr, u16 attr_cnt)
4458{
4459 bool found = false;
4460 u16 i;
4461
4462 for (i = 0; i < attr_cnt; i++)
4463 if (attr[i].ptype == ptype) {
4464 found = true;
4465
4466 prof->ptg[prof->ptg_cnt] = ptg;
4467 ice_get_ptype_attrib_info(attr[i].attrib,
4468 &prof->attr[prof->ptg_cnt]);
4469
4470 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
4471 return ICE_ERR_MAX_LIMIT;
4472 }
4473
4474 if (!found)
4475 return ICE_ERR_DOES_NOT_EXIST;
4476
4477 return 0;
4478}
4479
4480
4481
4482
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496enum ice_status
4497ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
4498 const struct ice_ptype_attributes *attr, u16 attr_cnt,
4499 struct ice_fv_word *es, u16 *masks)
4500{
4501 u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
4502 DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
4503 struct ice_prof_map *prof;
4504 enum ice_status status;
4505 u8 byte = 0;
4506 u8 prof_id;
4507
4508 bitmap_zero(ptgs_used, ICE_XLT1_CNT);
4509
4510 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4511
4512
4513 status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
4514 if (status) {
4515
4516 status = ice_alloc_prof_id(hw, blk, &prof_id);
4517 if (status)
4518 goto err_ice_add_prof;
4519 if (blk == ICE_BLK_FD) {
4520
4521
4522
4523
4524
4525
4526
4527 status = ice_update_fd_swap(hw, prof_id, es);
4528 if (status)
4529 goto err_ice_add_prof;
4530 }
4531 status = ice_update_prof_masking(hw, blk, prof_id, masks);
4532 if (status)
4533 goto err_ice_add_prof;
4534
4535
4536 ice_write_es(hw, blk, prof_id, es);
4537 }
4538
4539 ice_prof_inc_ref(hw, blk, prof_id);
4540
4541
4542 prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
4543 if (!prof) {
4544 status = ICE_ERR_NO_MEMORY;
4545 goto err_ice_add_prof;
4546 }
4547
4548 prof->profile_cookie = id;
4549 prof->prof_id = prof_id;
4550 prof->ptg_cnt = 0;
4551 prof->context = 0;
4552
4553
4554 while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
4555 u8 bit;
4556
4557 if (!ptypes[byte]) {
4558 bytes--;
4559 byte++;
4560 continue;
4561 }
4562
4563
4564 for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
4565 BITS_PER_BYTE) {
4566 u16 ptype;
4567 u8 ptg;
4568
4569 ptype = byte * BITS_PER_BYTE + bit;
4570
4571
4572
4573
4574 if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
4575 continue;
4576
4577
4578 if (test_bit(ptg, ptgs_used))
4579 continue;
4580
4581 set_bit(ptg, ptgs_used);
4582
4583
4584
4585 status = ice_add_prof_attrib(prof, ptg, ptype,
4586 attr, attr_cnt);
4587 if (status == ICE_ERR_MAX_LIMIT)
4588 break;
4589 if (status) {
4590
4591
4592
4593 prof->ptg[prof->ptg_cnt] = ptg;
4594 prof->attr[prof->ptg_cnt].flags = 0;
4595 prof->attr[prof->ptg_cnt].mask = 0;
4596
4597 if (++prof->ptg_cnt >=
4598 ICE_MAX_PTG_PER_PROFILE)
4599 break;
4600 }
4601 }
4602
4603 bytes--;
4604 byte++;
4605 }
4606
4607 list_add(&prof->list, &hw->blk[blk].es.prof_map);
4608 status = 0;
4609
4610err_ice_add_prof:
4611 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4612 return status;
4613}
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624static struct ice_prof_map *
4625ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
4626{
4627 struct ice_prof_map *entry = NULL;
4628 struct ice_prof_map *map;
4629
4630 list_for_each_entry(map, &hw->blk[blk].es.prof_map, list)
4631 if (map->profile_cookie == id) {
4632 entry = map;
4633 break;
4634 }
4635
4636 return entry;
4637}
4638
4639
4640
4641
4642
4643
4644
4645static u16
4646ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
4647{
4648 u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
4649 struct ice_vsig_prof *p;
4650
4651 list_for_each_entry(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4652 list)
4653 count++;
4654
4655 return count;
4656}
4657
4658
4659
4660
4661
4662
4663
4664static enum ice_status
4665ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
4666{
4667
4668 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4669 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
4670 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
4671 enum ice_status status;
4672
4673
4674 status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
4675 dc_msk, nm_msk);
4676 if (status)
4677 return status;
4678
4679
4680 status = ice_free_tcam_ent(hw, blk, idx);
4681
4682 return status;
4683}
4684
4685
4686
4687
4688
4689
4690
4691static enum ice_status
4692ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
4693 struct ice_vsig_prof *prof)
4694{
4695 enum ice_status status;
4696 u16 i;
4697
4698 for (i = 0; i < prof->tcam_count; i++)
4699 if (prof->tcam[i].in_use) {
4700 prof->tcam[i].in_use = false;
4701 status = ice_rel_tcam_idx(hw, blk,
4702 prof->tcam[i].tcam_idx);
4703 if (status)
4704 return ICE_ERR_HW_TABLE;
4705 }
4706
4707 return 0;
4708}
4709
4710
4711
4712
4713
4714
4715
4716
4717static enum ice_status
4718ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4719 struct list_head *chg)
4720{
4721 u16 idx = vsig & ICE_VSIG_IDX_M;
4722 struct ice_vsig_vsi *vsi_cur;
4723 struct ice_vsig_prof *d, *t;
4724 enum ice_status status;
4725
4726
4727 list_for_each_entry_safe(d, t,
4728 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4729 list) {
4730 status = ice_rem_prof_id(hw, blk, d);
4731 if (status)
4732 return status;
4733
4734 list_del(&d->list);
4735 devm_kfree(ice_hw_to_dev(hw), d);
4736 }
4737
4738
4739 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
4740
4741
4742
4743 if (vsi_cur)
4744 do {
4745 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
4746 struct ice_chs_chg *p;
4747
4748 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
4749 GFP_KERNEL);
4750 if (!p)
4751 return ICE_ERR_NO_MEMORY;
4752
4753 p->type = ICE_VSIG_REM;
4754 p->orig_vsig = vsig;
4755 p->vsig = ICE_DEFAULT_VSIG;
4756 p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
4757
4758 list_add(&p->list_entry, chg);
4759
4760 vsi_cur = tmp;
4761 } while (vsi_cur);
4762
4763 return ice_vsig_free(hw, blk, vsig);
4764}
4765
4766
4767
4768
4769
4770
4771
4772
4773
4774static enum ice_status
4775ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
4776 struct list_head *chg)
4777{
4778 u16 idx = vsig & ICE_VSIG_IDX_M;
4779 struct ice_vsig_prof *p, *t;
4780 enum ice_status status;
4781
4782 list_for_each_entry_safe(p, t,
4783 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4784 list)
4785 if (p->profile_cookie == hdl) {
4786 if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
4787
4788 return ice_rem_vsig(hw, blk, vsig, chg);
4789
4790 status = ice_rem_prof_id(hw, blk, p);
4791 if (!status) {
4792 list_del(&p->list);
4793 devm_kfree(ice_hw_to_dev(hw), p);
4794 }
4795 return status;
4796 }
4797
4798 return ICE_ERR_DOES_NOT_EXIST;
4799}
4800
4801
4802
4803
4804
4805
4806
4807static enum ice_status
4808ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
4809{
4810 struct ice_chs_chg *del, *tmp;
4811 enum ice_status status;
4812 struct list_head chg;
4813 u16 i;
4814
4815 INIT_LIST_HEAD(&chg);
4816
4817 for (i = 1; i < ICE_MAX_VSIGS; i++)
4818 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
4819 if (ice_has_prof_vsig(hw, blk, i, id)) {
4820 status = ice_rem_prof_id_vsig(hw, blk, i, id,
4821 &chg);
4822 if (status)
4823 goto err_ice_rem_flow_all;
4824 }
4825 }
4826
4827 status = ice_upd_prof_hw(hw, blk, &chg);
4828
4829err_ice_rem_flow_all:
4830 list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4831 list_del(&del->list_entry);
4832 devm_kfree(ice_hw_to_dev(hw), del);
4833 }
4834
4835 return status;
4836}
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
4849{
4850 struct ice_prof_map *pmap;
4851 enum ice_status status;
4852
4853 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4854
4855 pmap = ice_search_prof_id(hw, blk, id);
4856 if (!pmap) {
4857 status = ICE_ERR_DOES_NOT_EXIST;
4858 goto err_ice_rem_prof;
4859 }
4860
4861
4862 status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
4863 if (status)
4864 goto err_ice_rem_prof;
4865
4866
4867 ice_prof_dec_ref(hw, blk, pmap->prof_id);
4868
4869 list_del(&pmap->list);
4870 devm_kfree(ice_hw_to_dev(hw), pmap);
4871
4872err_ice_rem_prof:
4873 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4874 return status;
4875}
4876
4877
4878
4879
4880
4881
4882
4883
4884static enum ice_status
4885ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
4886 struct list_head *chg)
4887{
4888 enum ice_status status = 0;
4889 struct ice_prof_map *map;
4890 struct ice_chs_chg *p;
4891 u16 i;
4892
4893 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4894
4895 map = ice_search_prof_id(hw, blk, hdl);
4896 if (!map) {
4897 status = ICE_ERR_DOES_NOT_EXIST;
4898 goto err_ice_get_prof;
4899 }
4900
4901 for (i = 0; i < map->ptg_cnt; i++)
4902 if (!hw->blk[blk].es.written[map->prof_id]) {
4903
4904 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
4905 GFP_KERNEL);
4906 if (!p) {
4907 status = ICE_ERR_NO_MEMORY;
4908 goto err_ice_get_prof;
4909 }
4910
4911 p->type = ICE_PTG_ES_ADD;
4912 p->ptype = 0;
4913 p->ptg = map->ptg[i];
4914 p->add_ptg = 0;
4915
4916 p->add_prof = 1;
4917 p->prof_id = map->prof_id;
4918
4919 hw->blk[blk].es.written[map->prof_id] = true;
4920
4921 list_add(&p->list_entry, chg);
4922 }
4923
4924err_ice_get_prof:
4925 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4926
4927 return status;
4928}
4929
4930
4931
4932
4933
4934
4935
4936
4937
4938
4939static enum ice_status
4940ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4941 struct list_head *lst)
4942{
4943 struct ice_vsig_prof *ent1, *ent2;
4944 u16 idx = vsig & ICE_VSIG_IDX_M;
4945
4946 list_for_each_entry(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4947 list) {
4948 struct ice_vsig_prof *p;
4949
4950
4951 p = devm_kmemdup(ice_hw_to_dev(hw), ent1, sizeof(*p),
4952 GFP_KERNEL);
4953 if (!p)
4954 goto err_ice_get_profs_vsig;
4955
4956 list_add_tail(&p->list, lst);
4957 }
4958
4959 return 0;
4960
4961err_ice_get_profs_vsig:
4962 list_for_each_entry_safe(ent1, ent2, lst, list) {
4963 list_del(&ent1->list);
4964 devm_kfree(ice_hw_to_dev(hw), ent1);
4965 }
4966
4967 return ICE_ERR_NO_MEMORY;
4968}
4969
4970
4971
4972
4973
4974
4975
4976
4977static enum ice_status
4978ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
4979 struct list_head *lst, u64 hdl)
4980{
4981 enum ice_status status = 0;
4982 struct ice_prof_map *map;
4983 struct ice_vsig_prof *p;
4984 u16 i;
4985
4986 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4987 map = ice_search_prof_id(hw, blk, hdl);
4988 if (!map) {
4989 status = ICE_ERR_DOES_NOT_EXIST;
4990 goto err_ice_add_prof_to_lst;
4991 }
4992
4993 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4994 if (!p) {
4995 status = ICE_ERR_NO_MEMORY;
4996 goto err_ice_add_prof_to_lst;
4997 }
4998
4999 p->profile_cookie = map->profile_cookie;
5000 p->prof_id = map->prof_id;
5001 p->tcam_count = map->ptg_cnt;
5002
5003 for (i = 0; i < map->ptg_cnt; i++) {
5004 p->tcam[i].prof_id = map->prof_id;
5005 p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
5006 p->tcam[i].ptg = map->ptg[i];
5007 }
5008
5009 list_add(&p->list, lst);
5010
5011err_ice_add_prof_to_lst:
5012 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
5013 return status;
5014}
5015
5016
5017
5018
5019
5020
5021
5022
5023
5024static enum ice_status
5025ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
5026 struct list_head *chg)
5027{
5028 enum ice_status status;
5029 struct ice_chs_chg *p;
5030 u16 orig_vsig;
5031
5032 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
5033 if (!p)
5034 return ICE_ERR_NO_MEMORY;
5035
5036 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
5037 if (!status)
5038 status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
5039
5040 if (status) {
5041 devm_kfree(ice_hw_to_dev(hw), p);
5042 return status;
5043 }
5044
5045 p->type = ICE_VSI_MOVE;
5046 p->vsi = vsi;
5047 p->orig_vsig = orig_vsig;
5048 p->vsig = vsig;
5049
5050 list_add(&p->list_entry, chg);
5051
5052 return 0;
5053}
5054
5055
5056
5057
5058
5059
5060
5061static void
5062ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
5063{
5064 struct ice_chs_chg *pos, *tmp;
5065
5066 list_for_each_entry_safe(tmp, pos, chg, list_entry)
5067 if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
5068 list_del(&tmp->list_entry);
5069 devm_kfree(ice_hw_to_dev(hw), tmp);
5070 }
5071}
5072
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083
5084static enum ice_status
5085ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
5086 u16 vsig, struct ice_tcam_inf *tcam,
5087 struct list_head *chg)
5088{
5089 enum ice_status status;
5090 struct ice_chs_chg *p;
5091
5092 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5093 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
5094 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
5095
5096
5097 if (!enable) {
5098 status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
5099
5100
5101
5102
5103
5104 ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
5105 tcam->tcam_idx = 0;
5106 tcam->in_use = 0;
5107 return status;
5108 }
5109
5110
5111
5112
5113
5114
5115 status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0,
5116 &tcam->tcam_idx);
5117 if (status)
5118 return status;
5119
5120
5121 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
5122 if (!p)
5123 return ICE_ERR_NO_MEMORY;
5124
5125 status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
5126 tcam->ptg, vsig, 0, tcam->attr.flags,
5127 vl_msk, dc_msk, nm_msk);
5128 if (status)
5129 goto err_ice_prof_tcam_ena_dis;
5130
5131 tcam->in_use = 1;
5132
5133 p->type = ICE_TCAM_ADD;
5134 p->add_tcam_idx = true;
5135 p->prof_id = tcam->prof_id;
5136 p->ptg = tcam->ptg;
5137 p->vsig = 0;
5138 p->tcam_idx = tcam->tcam_idx;
5139
5140
5141 list_add(&p->list_entry, chg);
5142
5143 return 0;
5144
5145err_ice_prof_tcam_ena_dis:
5146 devm_kfree(ice_hw_to_dev(hw), p);
5147 return status;
5148}
5149
5150
5151
5152
5153
5154
5155
5156
5157static enum ice_status
5158ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
5159 struct list_head *chg)
5160{
5161 DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
5162 struct ice_vsig_prof *t;
5163 enum ice_status status;
5164 u16 idx;
5165
5166 bitmap_zero(ptgs_used, ICE_XLT1_CNT);
5167 idx = vsig & ICE_VSIG_IDX_M;
5168
5169
5170
5171
5172
5173
5174
5175
5176
5177
5178 list_for_each_entry(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5179 list) {
5180 u16 i;
5181
5182 for (i = 0; i < t->tcam_count; i++) {
5183
5184
5185
5186 if (test_bit(t->tcam[i].ptg, ptgs_used) &&
5187 t->tcam[i].in_use) {
5188
5189
5190
5191
5192 status = ice_prof_tcam_ena_dis(hw, blk, false,
5193 vsig,
5194 &t->tcam[i],
5195 chg);
5196 if (status)
5197 return status;
5198 } else if (!test_bit(t->tcam[i].ptg, ptgs_used) &&
5199 !t->tcam[i].in_use) {
5200
5201
5202
5203 status = ice_prof_tcam_ena_dis(hw, blk, true,
5204 vsig,
5205 &t->tcam[i],
5206 chg);
5207 if (status)
5208 return status;
5209 }
5210
5211
5212 set_bit(t->tcam[i].ptg, ptgs_used);
5213 }
5214 }
5215
5216 return 0;
5217}
5218
5219
5220
5221
5222
5223
5224
5225
5226
5227
5228static enum ice_status
5229ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
5230 bool rev, struct list_head *chg)
5231{
5232
5233 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5234 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
5235 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
5236 enum ice_status status = 0;
5237 struct ice_prof_map *map;
5238 struct ice_vsig_prof *t;
5239 struct ice_chs_chg *p;
5240 u16 vsig_idx, i;
5241
5242
5243 if (ice_has_prof_vsig(hw, blk, vsig, hdl))
5244 return ICE_ERR_ALREADY_EXISTS;
5245
5246
5247 t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL);
5248 if (!t)
5249 return ICE_ERR_NO_MEMORY;
5250
5251 mutex_lock(&hw->blk[blk].es.prof_map_lock);
5252
5253 map = ice_search_prof_id(hw, blk, hdl);
5254 if (!map) {
5255 status = ICE_ERR_DOES_NOT_EXIST;
5256 goto err_ice_add_prof_id_vsig;
5257 }
5258
5259 t->profile_cookie = map->profile_cookie;
5260 t->prof_id = map->prof_id;
5261 t->tcam_count = map->ptg_cnt;
5262
5263
5264 for (i = 0; i < map->ptg_cnt; i++) {
5265 u16 tcam_idx;
5266
5267
5268 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
5269 if (!p) {
5270 status = ICE_ERR_NO_MEMORY;
5271 goto err_ice_add_prof_id_vsig;
5272 }
5273
5274
5275
5276
5277
5278
5279 status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0,
5280 &tcam_idx);
5281 if (status) {
5282 devm_kfree(ice_hw_to_dev(hw), p);
5283 goto err_ice_add_prof_id_vsig;
5284 }
5285
5286 t->tcam[i].ptg = map->ptg[i];
5287 t->tcam[i].prof_id = map->prof_id;
5288 t->tcam[i].tcam_idx = tcam_idx;
5289 t->tcam[i].attr = map->attr[i];
5290 t->tcam[i].in_use = true;
5291
5292 p->type = ICE_TCAM_ADD;
5293 p->add_tcam_idx = true;
5294 p->prof_id = t->tcam[i].prof_id;
5295 p->ptg = t->tcam[i].ptg;
5296 p->vsig = vsig;
5297 p->tcam_idx = t->tcam[i].tcam_idx;
5298
5299
5300 status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
5301 t->tcam[i].prof_id,
5302 t->tcam[i].ptg, vsig, 0, 0,
5303 vl_msk, dc_msk, nm_msk);
5304 if (status) {
5305 devm_kfree(ice_hw_to_dev(hw), p);
5306 goto err_ice_add_prof_id_vsig;
5307 }
5308
5309
5310 list_add(&p->list_entry, chg);
5311 }
5312
5313
5314 vsig_idx = vsig & ICE_VSIG_IDX_M;
5315 if (rev)
5316 list_add_tail(&t->list,
5317 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5318 else
5319 list_add(&t->list,
5320 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5321
5322 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
5323 return status;
5324
5325err_ice_add_prof_id_vsig:
5326 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
5327
5328 devm_kfree(ice_hw_to_dev(hw), t);
5329 return status;
5330}
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340static enum ice_status
5341ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
5342 struct list_head *chg)
5343{
5344 enum ice_status status;
5345 struct ice_chs_chg *p;
5346 u16 new_vsig;
5347
5348 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
5349 if (!p)
5350 return ICE_ERR_NO_MEMORY;
5351
5352 new_vsig = ice_vsig_alloc(hw, blk);
5353 if (!new_vsig) {
5354 status = ICE_ERR_HW_TABLE;
5355 goto err_ice_create_prof_id_vsig;
5356 }
5357
5358 status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
5359 if (status)
5360 goto err_ice_create_prof_id_vsig;
5361
5362 status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
5363 if (status)
5364 goto err_ice_create_prof_id_vsig;
5365
5366 p->type = ICE_VSIG_ADD;
5367 p->vsi = vsi;
5368 p->orig_vsig = ICE_DEFAULT_VSIG;
5369 p->vsig = new_vsig;
5370
5371 list_add(&p->list_entry, chg);
5372
5373 return 0;
5374
5375err_ice_create_prof_id_vsig:
5376
5377 devm_kfree(ice_hw_to_dev(hw), p);
5378 return status;
5379}
5380
5381
5382
5383
5384
5385
5386
5387
5388
5389
5390static enum ice_status
5391ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
5392 struct list_head *lst, u16 *new_vsig,
5393 struct list_head *chg)
5394{
5395 struct ice_vsig_prof *t;
5396 enum ice_status status;
5397 u16 vsig;
5398
5399 vsig = ice_vsig_alloc(hw, blk);
5400 if (!vsig)
5401 return ICE_ERR_HW_TABLE;
5402
5403 status = ice_move_vsi(hw, blk, vsi, vsig, chg);
5404 if (status)
5405 return status;
5406
5407 list_for_each_entry(t, lst, list) {
5408
5409 status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
5410 true, chg);
5411 if (status)
5412 return status;
5413 }
5414
5415 *new_vsig = vsig;
5416
5417 return 0;
5418}
5419
5420
5421
5422
5423
5424
5425
5426
5427static bool
5428ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
5429{
5430 struct ice_vsig_prof *t;
5431 enum ice_status status;
5432 struct list_head lst;
5433
5434 INIT_LIST_HEAD(&lst);
5435
5436 t = kzalloc(sizeof(*t), GFP_KERNEL);
5437 if (!t)
5438 return false;
5439
5440 t->profile_cookie = hdl;
5441 list_add(&t->list, &lst);
5442
5443 status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
5444
5445 list_del(&t->list);
5446 kfree(t);
5447
5448 return !status;
5449}
5450
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462enum ice_status
5463ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5464{
5465 struct ice_vsig_prof *tmp1, *del1;
5466 struct ice_chs_chg *tmp, *del;
5467 struct list_head union_lst;
5468 enum ice_status status;
5469 struct list_head chg;
5470 u16 vsig;
5471
5472 INIT_LIST_HEAD(&union_lst);
5473 INIT_LIST_HEAD(&chg);
5474
5475
5476 status = ice_get_prof(hw, blk, hdl, &chg);
5477 if (status)
5478 return status;
5479
5480
5481 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5482 if (!status && vsig) {
5483 bool only_vsi;
5484 u16 or_vsig;
5485 u16 ref;
5486
5487
5488 or_vsig = vsig;
5489
5490
5491
5492
5493
5494 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
5495 status = ICE_ERR_ALREADY_EXISTS;
5496 goto err_ice_add_prof_id_flow;
5497 }
5498
5499
5500 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5501 if (status)
5502 goto err_ice_add_prof_id_flow;
5503 only_vsi = (ref == 1);
5504
5505
5506
5507
5508 status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
5509 if (status)
5510 goto err_ice_add_prof_id_flow;
5511
5512 status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
5513 if (status)
5514 goto err_ice_add_prof_id_flow;
5515
5516
5517 status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
5518 if (!status) {
5519
5520 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5521 if (status)
5522 goto err_ice_add_prof_id_flow;
5523
5524
5525
5526
5527 if (only_vsi) {
5528 status = ice_rem_vsig(hw, blk, or_vsig, &chg);
5529 if (status)
5530 goto err_ice_add_prof_id_flow;
5531 }
5532 } else if (only_vsi) {
5533
5534
5535
5536
5537
5538 status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
5539 &chg);
5540 if (status)
5541 goto err_ice_add_prof_id_flow;
5542
5543
5544 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5545 if (status)
5546 goto err_ice_add_prof_id_flow;
5547 } else {
5548
5549 status = ice_create_vsig_from_lst(hw, blk, vsi,
5550 &union_lst, &vsig,
5551 &chg);
5552 if (status)
5553 goto err_ice_add_prof_id_flow;
5554
5555
5556 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5557 if (status)
5558 goto err_ice_add_prof_id_flow;
5559 }
5560 } else {
5561
5562
5563 if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
5564
5565
5566 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5567 if (status)
5568 goto err_ice_add_prof_id_flow;
5569 } else {
5570
5571
5572 status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
5573 &chg);
5574 if (status)
5575 goto err_ice_add_prof_id_flow;
5576 }
5577 }
5578
5579
5580 if (!status)
5581 status = ice_upd_prof_hw(hw, blk, &chg);
5582
5583err_ice_add_prof_id_flow:
5584 list_for_each_entry_safe(del, tmp, &chg, list_entry) {
5585 list_del(&del->list_entry);
5586 devm_kfree(ice_hw_to_dev(hw), del);
5587 }
5588
5589 list_for_each_entry_safe(del1, tmp1, &union_lst, list) {
5590 list_del(&del1->list);
5591 devm_kfree(ice_hw_to_dev(hw), del1);
5592 }
5593
5594 return status;
5595}
5596
5597
5598
5599
5600
5601
5602
5603static enum ice_status
5604ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
5605{
5606 struct ice_vsig_prof *ent, *tmp;
5607
5608 list_for_each_entry_safe(ent, tmp, lst, list)
5609 if (ent->profile_cookie == hdl) {
5610 list_del(&ent->list);
5611 devm_kfree(ice_hw_to_dev(hw), ent);
5612 return 0;
5613 }
5614
5615 return ICE_ERR_DOES_NOT_EXIST;
5616}
5617
5618
5619
5620
5621
5622
5623
5624
5625
5626
5627
5628
5629enum ice_status
5630ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5631{
5632 struct ice_vsig_prof *tmp1, *del1;
5633 struct ice_chs_chg *tmp, *del;
5634 struct list_head chg, copy;
5635 enum ice_status status;
5636 u16 vsig;
5637
5638 INIT_LIST_HEAD(©);
5639 INIT_LIST_HEAD(&chg);
5640
5641
5642 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5643 if (!status && vsig) {
5644 bool last_profile;
5645 bool only_vsi;
5646 u16 ref;
5647
5648
5649 last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
5650 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5651 if (status)
5652 goto err_ice_rem_prof_id_flow;
5653 only_vsi = (ref == 1);
5654
5655 if (only_vsi) {
5656
5657
5658
5659
5660
5661
5662 if (last_profile) {
5663
5664
5665
5666 status = ice_rem_vsig(hw, blk, vsig, &chg);
5667 if (status)
5668 goto err_ice_rem_prof_id_flow;
5669 } else {
5670 status = ice_rem_prof_id_vsig(hw, blk, vsig,
5671 hdl, &chg);
5672 if (status)
5673 goto err_ice_rem_prof_id_flow;
5674
5675
5676 status = ice_adj_prof_priorities(hw, blk, vsig,
5677 &chg);
5678 if (status)
5679 goto err_ice_rem_prof_id_flow;
5680 }
5681
5682 } else {
5683
5684 status = ice_get_profs_vsig(hw, blk, vsig, ©);
5685 if (status)
5686 goto err_ice_rem_prof_id_flow;
5687
5688
5689 status = ice_rem_prof_from_list(hw, ©, hdl);
5690 if (status)
5691 goto err_ice_rem_prof_id_flow;
5692
5693 if (list_empty(©)) {
5694 status = ice_move_vsi(hw, blk, vsi,
5695 ICE_DEFAULT_VSIG, &chg);
5696 if (status)
5697 goto err_ice_rem_prof_id_flow;
5698
5699 } else if (!ice_find_dup_props_vsig(hw, blk, ©,
5700 &vsig)) {
5701
5702
5703
5704
5705
5706
5707
5708 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5709 if (status)
5710 goto err_ice_rem_prof_id_flow;
5711 } else {
5712
5713
5714
5715
5716 status = ice_create_vsig_from_lst(hw, blk, vsi,
5717 ©, &vsig,
5718 &chg);
5719 if (status)
5720 goto err_ice_rem_prof_id_flow;
5721
5722
5723 status = ice_adj_prof_priorities(hw, blk, vsig,
5724 &chg);
5725 if (status)
5726 goto err_ice_rem_prof_id_flow;
5727 }
5728 }
5729 } else {
5730 status = ICE_ERR_DOES_NOT_EXIST;
5731 }
5732
5733
5734 if (!status)
5735 status = ice_upd_prof_hw(hw, blk, &chg);
5736
5737err_ice_rem_prof_id_flow:
5738 list_for_each_entry_safe(del, tmp, &chg, list_entry) {
5739 list_del(&del->list_entry);
5740 devm_kfree(ice_hw_to_dev(hw), del);
5741 }
5742
5743 list_for_each_entry_safe(del1, tmp1, ©, list) {
5744 list_del(&del1->list);
5745 devm_kfree(ice_hw_to_dev(hw), del1);
5746 }
5747
5748 return status;
5749}
5750