1
2
3
4#include "ice_common.h"
5#include "ice_flex_pipe.h"
6#include "ice_flow.h"
7
8
9
10
11static const struct ice_tunnel_type_scan tnls[] = {
12 { TNL_VXLAN, "TNL_VXLAN_PF" },
13 { TNL_GENEVE, "TNL_GENEVE_PF" },
14 { TNL_LAST, "" }
15};
16
17static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
18
19 {
20 ICE_SID_XLT0_SW,
21 ICE_SID_XLT_KEY_BUILDER_SW,
22 ICE_SID_XLT1_SW,
23 ICE_SID_XLT2_SW,
24 ICE_SID_PROFID_TCAM_SW,
25 ICE_SID_PROFID_REDIR_SW,
26 ICE_SID_FLD_VEC_SW,
27 ICE_SID_CDID_KEY_BUILDER_SW,
28 ICE_SID_CDID_REDIR_SW
29 },
30
31
32 {
33 ICE_SID_XLT0_ACL,
34 ICE_SID_XLT_KEY_BUILDER_ACL,
35 ICE_SID_XLT1_ACL,
36 ICE_SID_XLT2_ACL,
37 ICE_SID_PROFID_TCAM_ACL,
38 ICE_SID_PROFID_REDIR_ACL,
39 ICE_SID_FLD_VEC_ACL,
40 ICE_SID_CDID_KEY_BUILDER_ACL,
41 ICE_SID_CDID_REDIR_ACL
42 },
43
44
45 {
46 ICE_SID_XLT0_FD,
47 ICE_SID_XLT_KEY_BUILDER_FD,
48 ICE_SID_XLT1_FD,
49 ICE_SID_XLT2_FD,
50 ICE_SID_PROFID_TCAM_FD,
51 ICE_SID_PROFID_REDIR_FD,
52 ICE_SID_FLD_VEC_FD,
53 ICE_SID_CDID_KEY_BUILDER_FD,
54 ICE_SID_CDID_REDIR_FD
55 },
56
57
58 {
59 ICE_SID_XLT0_RSS,
60 ICE_SID_XLT_KEY_BUILDER_RSS,
61 ICE_SID_XLT1_RSS,
62 ICE_SID_XLT2_RSS,
63 ICE_SID_PROFID_TCAM_RSS,
64 ICE_SID_PROFID_REDIR_RSS,
65 ICE_SID_FLD_VEC_RSS,
66 ICE_SID_CDID_KEY_BUILDER_RSS,
67 ICE_SID_CDID_REDIR_RSS
68 },
69
70
71 {
72 ICE_SID_XLT0_PE,
73 ICE_SID_XLT_KEY_BUILDER_PE,
74 ICE_SID_XLT1_PE,
75 ICE_SID_XLT2_PE,
76 ICE_SID_PROFID_TCAM_PE,
77 ICE_SID_PROFID_REDIR_PE,
78 ICE_SID_FLD_VEC_PE,
79 ICE_SID_CDID_KEY_BUILDER_PE,
80 ICE_SID_CDID_REDIR_PE
81 }
82};
83
84
85
86
87
88
89
90
91
92static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
93{
94 return ice_sect_lkup[blk][sect];
95}
96
97
98
99
100
101
102
103static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
104{
105 struct ice_buf_hdr *hdr;
106 u16 section_count;
107 u16 data_end;
108
109 hdr = (struct ice_buf_hdr *)buf->buf;
110
111 section_count = le16_to_cpu(hdr->section_count);
112 if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
113 return NULL;
114
115 data_end = le16_to_cpu(hdr->data_end);
116 if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
117 return NULL;
118
119 return hdr;
120}
121
122
123
124
125
126
127
128static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
129{
130 struct ice_nvm_table *nvms;
131
132 nvms = (struct ice_nvm_table *)
133 (ice_seg->device_table +
134 le32_to_cpu(ice_seg->device_table_count));
135
136 return (__force struct ice_buf_table *)
137 (nvms->vers + le32_to_cpu(nvms->table_count));
138}
139
140
141
142
143
144
145
146
147
148
149
150
151
152static struct ice_buf_hdr *
153ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
154{
155 if (ice_seg) {
156 state->buf_table = ice_find_buf_table(ice_seg);
157 if (!state->buf_table)
158 return NULL;
159
160 state->buf_idx = 0;
161 return ice_pkg_val_buf(state->buf_table->buf_array);
162 }
163
164 if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
165 return ice_pkg_val_buf(state->buf_table->buf_array +
166 state->buf_idx);
167 else
168 return NULL;
169}
170
171
172
173
174
175
176
177
178
179static bool
180ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
181{
182 if (!ice_seg && !state->buf)
183 return false;
184
185 if (!ice_seg && state->buf)
186 if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
187 return true;
188
189 state->buf = ice_pkg_enum_buf(ice_seg, state);
190 if (!state->buf)
191 return false;
192
193
194 state->sect_idx = 0;
195 return true;
196}
197
198
199
200
201
202
203
204
205
206
207
208
209
210static void *
211ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
212 u32 sect_type)
213{
214 u16 offset, size;
215
216 if (ice_seg)
217 state->type = sect_type;
218
219 if (!ice_pkg_advance_sect(ice_seg, state))
220 return NULL;
221
222
223 while (state->buf->section_entry[state->sect_idx].type !=
224 cpu_to_le32(state->type))
225 if (!ice_pkg_advance_sect(NULL, state))
226 return NULL;
227
228
229 offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
230 if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
231 return NULL;
232
233 size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
234 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
235 return NULL;
236
237
238 if (offset + size > ICE_PKG_BUF_SIZE)
239 return NULL;
240
241 state->sect_type =
242 le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
243
244
245 state->sect = ((u8 *)state->buf) +
246 le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
247
248 return state->sect;
249}
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276static void *
277ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
278 u32 sect_type, u32 *offset,
279 void *(*handler)(u32 sect_type, void *section,
280 u32 index, u32 *offset))
281{
282 void *entry;
283
284 if (ice_seg) {
285 if (!handler)
286 return NULL;
287
288 if (!ice_pkg_enum_section(ice_seg, state, sect_type))
289 return NULL;
290
291 state->entry_idx = 0;
292 state->handler = handler;
293 } else {
294 state->entry_idx++;
295 }
296
297 if (!state->handler)
298 return NULL;
299
300
301 entry = state->handler(state->sect_type, state->sect, state->entry_idx,
302 offset);
303 if (!entry) {
304
305 if (!ice_pkg_enum_section(NULL, state, 0))
306 return NULL;
307
308 state->entry_idx = 0;
309 entry = state->handler(state->sect_type, state->sect,
310 state->entry_idx, offset);
311 }
312
313 return entry;
314}
315
316
317
318
319
320
321
322
323
324
325
326static void *
327ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
328{
329 struct ice_boost_tcam_section *boost;
330
331 if (!section)
332 return NULL;
333
334 if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
335 return NULL;
336
337 if (index > ICE_MAX_BST_TCAMS_IN_BUF)
338 return NULL;
339
340 if (offset)
341 *offset = 0;
342
343 boost = section;
344 if (index >= le16_to_cpu(boost->count))
345 return NULL;
346
347 return boost->tcam + index;
348}
349
350
351
352
353
354
355
356
357
358
359
360static enum ice_status
361ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
362 struct ice_boost_tcam_entry **entry)
363{
364 struct ice_boost_tcam_entry *tcam;
365 struct ice_pkg_enum state;
366
367 memset(&state, 0, sizeof(state));
368
369 if (!ice_seg)
370 return ICE_ERR_PARAM;
371
372 do {
373 tcam = ice_pkg_enum_entry(ice_seg, &state,
374 ICE_SID_RXPARSER_BOOST_TCAM, NULL,
375 ice_boost_tcam_handler);
376 if (tcam && le16_to_cpu(tcam->addr) == addr) {
377 *entry = tcam;
378 return 0;
379 }
380
381 ice_seg = NULL;
382 } while (tcam);
383
384 *entry = NULL;
385 return ICE_ERR_CFG;
386}
387
388
389
390
391
392
393
394
395
396
397
398static void *
399ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
400 u32 *offset)
401{
402 struct ice_label_section *labels;
403
404 if (!section)
405 return NULL;
406
407 if (index > ICE_MAX_LABELS_IN_BUF)
408 return NULL;
409
410 if (offset)
411 *offset = 0;
412
413 labels = section;
414 if (index >= le16_to_cpu(labels->count))
415 return NULL;
416
417 return labels->label + index;
418}
419
420
421
422
423
424
425
426
427
428
429
430
431
432static char *
433ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
434 u16 *value)
435{
436 struct ice_label *label;
437
438
439 if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
440 return NULL;
441
442 label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
443 ice_label_enum_handler);
444 if (!label)
445 return NULL;
446
447 *value = le16_to_cpu(label->value);
448 return label->name;
449}
450
451
452
453
454
455
456
457
458
459
460
461static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
462{
463 struct ice_pkg_enum state;
464 char *label_name;
465 u16 val;
466 int i;
467
468 memset(&hw->tnl, 0, sizeof(hw->tnl));
469 memset(&state, 0, sizeof(state));
470
471 if (!ice_seg)
472 return;
473
474 label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
475 &val);
476
477 while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
478 for (i = 0; tnls[i].type != TNL_LAST; i++) {
479 size_t len = strlen(tnls[i].label_prefix);
480
481
482 if (strncmp(label_name, tnls[i].label_prefix, len))
483 continue;
484
485
486
487
488
489 if ((label_name[len] - '0') == hw->pf_id) {
490 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
491 hw->tnl.tbl[hw->tnl.count].valid = false;
492 hw->tnl.tbl[hw->tnl.count].in_use = false;
493 hw->tnl.tbl[hw->tnl.count].marked = false;
494 hw->tnl.tbl[hw->tnl.count].boost_addr = val;
495 hw->tnl.tbl[hw->tnl.count].port = 0;
496 hw->tnl.count++;
497 break;
498 }
499 }
500
501 label_name = ice_enum_labels(NULL, 0, &state, &val);
502 }
503
504
505 for (i = 0; i < hw->tnl.count; i++) {
506 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
507 &hw->tnl.tbl[i].boost_entry);
508 if (hw->tnl.tbl[i].boost_entry)
509 hw->tnl.tbl[i].valid = true;
510 }
511}
512
513
514
515#define ICE_DC_KEY 0x1
516#define ICE_DC_KEYINV 0x1
517#define ICE_NM_KEY 0x0
518#define ICE_NM_KEYINV 0x0
519#define ICE_0_KEY 0x1
520#define ICE_0_KEYINV 0x0
521#define ICE_1_KEY 0x0
522#define ICE_1_KEYINV 0x1
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549static enum ice_status
550ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
551 u8 *key_inv)
552{
553 u8 in_key = *key, in_key_inv = *key_inv;
554 u8 i;
555
556
557 if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
558 return ICE_ERR_CFG;
559
560 *key = 0;
561 *key_inv = 0;
562
563
564 for (i = 0; i < 8; i++) {
565 *key >>= 1;
566 *key_inv >>= 1;
567
568 if (!(valid & 0x1)) {
569 *key |= (in_key & 0x1) << 7;
570 *key_inv |= (in_key_inv & 0x1) << 7;
571 } else if (dont_care & 0x1) {
572 *key |= ICE_DC_KEY << 7;
573 *key_inv |= ICE_DC_KEYINV << 7;
574 } else if (nvr_mtch & 0x1) {
575 *key |= ICE_NM_KEY << 7;
576 *key_inv |= ICE_NM_KEYINV << 7;
577 } else if (val & 0x01) {
578 *key |= ICE_1_KEY << 7;
579 *key_inv |= ICE_1_KEYINV << 7;
580 } else {
581 *key |= ICE_0_KEY << 7;
582 *key_inv |= ICE_0_KEYINV << 7;
583 }
584
585 dont_care >>= 1;
586 nvr_mtch >>= 1;
587 valid >>= 1;
588 val >>= 1;
589 in_key >>= 1;
590 in_key_inv >>= 1;
591 }
592
593 return 0;
594}
595
596
597
598
599
600
601
602
603
604
605
606static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
607{
608 u16 count = 0;
609 u16 i;
610
611
612 for (i = 0; i < size; i++) {
613
614 if (!mask[i])
615 continue;
616
617
618
619
620
621 if (count == max)
622 return false;
623
624
625 count += hweight8(mask[i]);
626 if (count > max)
627 return false;
628 }
629
630 return true;
631}
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651static enum ice_status
652ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
653 u16 len)
654{
655 u16 half_size;
656 u16 i;
657
658
659 if (size % 2)
660 return ICE_ERR_CFG;
661
662 half_size = size / 2;
663 if (off + len > half_size)
664 return ICE_ERR_CFG;
665
666
667
668
669
670#define ICE_NVR_MTCH_BITS_MAX 1
671 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
672 return ICE_ERR_CFG;
673
674 for (i = 0; i < len; i++)
675 if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
676 dc ? dc[i] : 0, nm ? nm[i] : 0,
677 key + off + i, key + half_size + off + i))
678 return ICE_ERR_CFG;
679
680 return 0;
681}
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699static enum ice_status
700ice_acquire_global_cfg_lock(struct ice_hw *hw,
701 enum ice_aq_res_access_type access)
702{
703 enum ice_status status;
704
705 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
706 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
707
708 if (!status)
709 mutex_lock(&ice_global_cfg_lock_sw);
710 else if (status == ICE_ERR_AQ_NO_WORK)
711 ice_debug(hw, ICE_DBG_PKG,
712 "Global config lock: No work to do\n");
713
714 return status;
715}
716
717
718
719
720
721
722
723static void ice_release_global_cfg_lock(struct ice_hw *hw)
724{
725 mutex_unlock(&ice_global_cfg_lock_sw);
726 ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
727}
728
729
730
731
732
733
734
735
736static enum ice_status
737ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
738{
739 return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
740 ICE_CHANGE_LOCK_TIMEOUT);
741}
742
743
744
745
746
747
748
749static void ice_release_change_lock(struct ice_hw *hw)
750{
751 ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
752}
753
754
755
756
757
758
759
760
761
762
763
764
765
766static enum ice_status
767ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
768 u16 buf_size, bool last_buf, u32 *error_offset,
769 u32 *error_info, struct ice_sq_cd *cd)
770{
771 struct ice_aqc_download_pkg *cmd;
772 struct ice_aq_desc desc;
773 enum ice_status status;
774
775 if (error_offset)
776 *error_offset = 0;
777 if (error_info)
778 *error_info = 0;
779
780 cmd = &desc.params.download_pkg;
781 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
782 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
783
784 if (last_buf)
785 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
786
787 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
788 if (status == ICE_ERR_AQ_ERROR) {
789
790 struct ice_aqc_download_pkg_resp *resp;
791
792 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
793 if (error_offset)
794 *error_offset = le32_to_cpu(resp->error_offset);
795 if (error_info)
796 *error_info = le32_to_cpu(resp->error_info);
797 }
798
799 return status;
800}
801
802
803
804
805
806
807
808
809
810
811
812
813
814static enum ice_status
815ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
816 bool last_buf, u32 *error_offset, u32 *error_info,
817 struct ice_sq_cd *cd)
818{
819 struct ice_aqc_download_pkg *cmd;
820 struct ice_aq_desc desc;
821 enum ice_status status;
822
823 if (error_offset)
824 *error_offset = 0;
825 if (error_info)
826 *error_info = 0;
827
828 cmd = &desc.params.download_pkg;
829 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
830 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
831
832 if (last_buf)
833 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
834
835 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
836 if (status == ICE_ERR_AQ_ERROR) {
837
838 struct ice_aqc_download_pkg_resp *resp;
839
840 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
841 if (error_offset)
842 *error_offset = le32_to_cpu(resp->error_offset);
843 if (error_info)
844 *error_info = le32_to_cpu(resp->error_info);
845 }
846
847 return status;
848}
849
850
851
852
853
854
855
856
857
858
859
860static struct ice_generic_seg_hdr *
861ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
862 struct ice_pkg_hdr *pkg_hdr)
863{
864 u32 i;
865
866 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
867 pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
868 pkg_hdr->pkg_format_ver.update,
869 pkg_hdr->pkg_format_ver.draft);
870
871
872 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
873 struct ice_generic_seg_hdr *seg;
874
875 seg = (struct ice_generic_seg_hdr *)
876 ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]));
877
878 if (le32_to_cpu(seg->seg_type) == seg_type)
879 return seg;
880 }
881
882 return NULL;
883}
884
885
886
887
888
889
890
891
892
893static enum ice_status
894ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
895{
896 enum ice_status status;
897 u32 offset, info, i;
898
899 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
900 if (status)
901 return status;
902
903 for (i = 0; i < count; i++) {
904 struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
905 bool last = ((i + 1) == count);
906
907 status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
908 last, &offset, &info, NULL);
909
910 if (status) {
911 ice_debug(hw, ICE_DBG_PKG,
912 "Update pkg failed: err %d off %d inf %d\n",
913 status, offset, info);
914 break;
915 }
916 }
917
918 ice_release_change_lock(hw);
919
920 return status;
921}
922
923
924
925
926
927
928
929
930
931
932
933static enum ice_status
934ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
935{
936 enum ice_status status;
937 struct ice_buf_hdr *bh;
938 u32 offset, info, i;
939
940 if (!bufs || !count)
941 return ICE_ERR_PARAM;
942
943
944
945
946
947 bh = (struct ice_buf_hdr *)bufs;
948 if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
949 return 0;
950
951
952
953
954 hw->pkg_dwnld_status = ICE_AQ_RC_OK;
955
956 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
957 if (status) {
958 if (status == ICE_ERR_AQ_NO_WORK)
959 hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
960 else
961 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
962 return status;
963 }
964
965 for (i = 0; i < count; i++) {
966 bool last = ((i + 1) == count);
967
968 if (!last) {
969
970 bh = (struct ice_buf_hdr *)(bufs + i + 1);
971
972
973
974
975
976 if (le16_to_cpu(bh->section_count))
977 if (le32_to_cpu(bh->section_entry[0].type) &
978 ICE_METADATA_BUF)
979 last = true;
980 }
981
982 bh = (struct ice_buf_hdr *)(bufs + i);
983
984 status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
985 &offset, &info, NULL);
986
987
988 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
989 if (status) {
990 ice_debug(hw, ICE_DBG_PKG,
991 "Pkg download failed: err %d off %d inf %d\n",
992 status, offset, info);
993
994 break;
995 }
996
997 if (last)
998 break;
999 }
1000
1001 ice_release_global_cfg_lock(hw);
1002
1003 return status;
1004}
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015static enum ice_status
1016ice_aq_get_pkg_info_list(struct ice_hw *hw,
1017 struct ice_aqc_get_pkg_info_resp *pkg_info,
1018 u16 buf_size, struct ice_sq_cd *cd)
1019{
1020 struct ice_aq_desc desc;
1021
1022 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
1023
1024 return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
1025}
1026
1027
1028
1029
1030
1031
1032
1033
1034static enum ice_status
1035ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
1036{
1037 struct ice_buf_table *ice_buf_tbl;
1038
1039 ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
1040 ice_seg->hdr.seg_format_ver.major,
1041 ice_seg->hdr.seg_format_ver.minor,
1042 ice_seg->hdr.seg_format_ver.update,
1043 ice_seg->hdr.seg_format_ver.draft);
1044
1045 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
1046 le32_to_cpu(ice_seg->hdr.seg_type),
1047 le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
1048
1049 ice_buf_tbl = ice_find_buf_table(ice_seg);
1050
1051 ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
1052 le32_to_cpu(ice_buf_tbl->buf_count));
1053
1054 return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
1055 le32_to_cpu(ice_buf_tbl->buf_count));
1056}
1057
1058
1059
1060
1061
1062
1063
1064
1065static enum ice_status
1066ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
1067{
1068 struct ice_global_metadata_seg *meta_seg;
1069 struct ice_generic_seg_hdr *seg_hdr;
1070
1071 if (!pkg_hdr)
1072 return ICE_ERR_PARAM;
1073
1074 meta_seg = (struct ice_global_metadata_seg *)
1075 ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr);
1076 if (meta_seg) {
1077 hw->pkg_ver = meta_seg->pkg_ver;
1078 memcpy(hw->pkg_name, meta_seg->pkg_name, sizeof(hw->pkg_name));
1079
1080 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
1081 meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor,
1082 meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft,
1083 meta_seg->pkg_name);
1084 } else {
1085 ice_debug(hw, ICE_DBG_INIT,
1086 "Did not find metadata segment in driver package\n");
1087 return ICE_ERR_CFG;
1088 }
1089
1090 seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
1091 if (seg_hdr) {
1092 hw->ice_pkg_ver = seg_hdr->seg_format_ver;
1093 memcpy(hw->ice_pkg_name, seg_hdr->seg_id,
1094 sizeof(hw->ice_pkg_name));
1095
1096 ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
1097 seg_hdr->seg_format_ver.major,
1098 seg_hdr->seg_format_ver.minor,
1099 seg_hdr->seg_format_ver.update,
1100 seg_hdr->seg_format_ver.draft,
1101 seg_hdr->seg_id);
1102 } else {
1103 ice_debug(hw, ICE_DBG_INIT,
1104 "Did not find ice segment in driver package\n");
1105 return ICE_ERR_CFG;
1106 }
1107
1108 return 0;
1109}
1110
1111
1112
1113
1114
1115
1116
1117static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
1118{
1119 struct ice_aqc_get_pkg_info_resp *pkg_info;
1120 enum ice_status status;
1121 u16 size;
1122 u32 i;
1123
1124 size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
1125 pkg_info = kzalloc(size, GFP_KERNEL);
1126 if (!pkg_info)
1127 return ICE_ERR_NO_MEMORY;
1128
1129 status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
1130 if (status)
1131 goto init_pkg_free_alloc;
1132
1133 for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
1134#define ICE_PKG_FLAG_COUNT 4
1135 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
1136 u8 place = 0;
1137
1138 if (pkg_info->pkg_info[i].is_active) {
1139 flags[place++] = 'A';
1140 hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
1141 hw->active_track_id =
1142 le32_to_cpu(pkg_info->pkg_info[i].track_id);
1143 memcpy(hw->active_pkg_name,
1144 pkg_info->pkg_info[i].name,
1145 sizeof(pkg_info->pkg_info[i].name));
1146 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
1147 }
1148 if (pkg_info->pkg_info[i].is_active_at_boot)
1149 flags[place++] = 'B';
1150 if (pkg_info->pkg_info[i].is_modified)
1151 flags[place++] = 'M';
1152 if (pkg_info->pkg_info[i].is_in_nvm)
1153 flags[place++] = 'N';
1154
1155 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
1156 i, pkg_info->pkg_info[i].ver.major,
1157 pkg_info->pkg_info[i].ver.minor,
1158 pkg_info->pkg_info[i].ver.update,
1159 pkg_info->pkg_info[i].ver.draft,
1160 pkg_info->pkg_info[i].name, flags);
1161 }
1162
1163init_pkg_free_alloc:
1164 kfree(pkg_info);
1165
1166 return status;
1167}
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1178{
1179 u32 seg_count;
1180 u32 i;
1181
1182 if (len < struct_size(pkg, seg_offset, 1))
1183 return ICE_ERR_BUF_TOO_SHORT;
1184
1185 if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1186 pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1187 pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
1188 pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
1189 return ICE_ERR_CFG;
1190
1191
1192 seg_count = le32_to_cpu(pkg->seg_count);
1193 if (seg_count < 1)
1194 return ICE_ERR_CFG;
1195
1196
1197 if (len < struct_size(pkg, seg_offset, seg_count))
1198 return ICE_ERR_BUF_TOO_SHORT;
1199
1200
1201 for (i = 0; i < seg_count; i++) {
1202 u32 off = le32_to_cpu(pkg->seg_offset[i]);
1203 struct ice_generic_seg_hdr *seg;
1204
1205
1206 if (len < off + sizeof(*seg))
1207 return ICE_ERR_BUF_TOO_SHORT;
1208
1209 seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1210
1211
1212 if (len < off + le32_to_cpu(seg->seg_size))
1213 return ICE_ERR_BUF_TOO_SHORT;
1214 }
1215
1216 return 0;
1217}
1218
1219
1220
1221
1222
1223
1224
1225
1226void ice_free_seg(struct ice_hw *hw)
1227{
1228 if (hw->pkg_copy) {
1229 devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
1230 hw->pkg_copy = NULL;
1231 hw->pkg_size = 0;
1232 }
1233 hw->seg = NULL;
1234}
1235
1236
1237
1238
1239
1240static void ice_init_pkg_regs(struct ice_hw *hw)
1241{
1242#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1243#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1244#define ICE_SW_BLK_IDX 0
1245
1246
1247 wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1248 wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1249}
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1261{
1262 if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
1263 pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
1264 return ICE_ERR_NOT_SUPPORTED;
1265
1266 return 0;
1267}
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277static enum ice_status
1278ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
1279 struct ice_seg **seg)
1280{
1281 struct ice_aqc_get_pkg_info_resp *pkg;
1282 enum ice_status status;
1283 u16 size;
1284 u32 i;
1285
1286
1287 status = ice_chk_pkg_version(&hw->pkg_ver);
1288 if (status) {
1289 ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1290 return status;
1291 }
1292
1293
1294 *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
1295 ospkg);
1296 if (!*seg) {
1297 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1298 return ICE_ERR_CFG;
1299 }
1300
1301
1302 size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
1303 pkg = kzalloc(size, GFP_KERNEL);
1304 if (!pkg)
1305 return ICE_ERR_NO_MEMORY;
1306
1307 status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
1308 if (status)
1309 goto fw_ddp_compat_free_alloc;
1310
1311 for (i = 0; i < le32_to_cpu(pkg->count); i++) {
1312
1313 if (!pkg->pkg_info[i].is_in_nvm)
1314 continue;
1315 if ((*seg)->hdr.seg_format_ver.major !=
1316 pkg->pkg_info[i].ver.major ||
1317 (*seg)->hdr.seg_format_ver.minor >
1318 pkg->pkg_info[i].ver.minor) {
1319 status = ICE_ERR_FW_DDP_MISMATCH;
1320 ice_debug(hw, ICE_DBG_INIT,
1321 "OS package is not compatible with NVM.\n");
1322 }
1323
1324 break;
1325 }
1326fw_ddp_compat_free_alloc:
1327 kfree(pkg);
1328 return status;
1329}
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1357{
1358 struct ice_pkg_hdr *pkg;
1359 enum ice_status status;
1360 struct ice_seg *seg;
1361
1362 if (!buf || !len)
1363 return ICE_ERR_PARAM;
1364
1365 pkg = (struct ice_pkg_hdr *)buf;
1366 status = ice_verify_pkg(pkg, len);
1367 if (status) {
1368 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1369 status);
1370 return status;
1371 }
1372
1373
1374 status = ice_init_pkg_info(hw, pkg);
1375 if (status)
1376 return status;
1377
1378
1379
1380
1381 status = ice_chk_pkg_compat(hw, pkg, &seg);
1382 if (status)
1383 return status;
1384
1385
1386 ice_init_pkg_hints(hw, seg);
1387 status = ice_download_pkg(hw, seg);
1388 if (status == ICE_ERR_AQ_NO_WORK) {
1389 ice_debug(hw, ICE_DBG_INIT,
1390 "package previously loaded - no work.\n");
1391 status = 0;
1392 }
1393
1394
1395
1396
1397 if (!status) {
1398 status = ice_get_pkg_info(hw);
1399 if (!status)
1400 status = ice_chk_pkg_version(&hw->active_pkg_ver);
1401 }
1402
1403 if (!status) {
1404 hw->seg = seg;
1405
1406
1407
1408
1409 ice_init_pkg_regs(hw);
1410 ice_fill_blk_tbls(hw);
1411 } else {
1412 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1413 status);
1414 }
1415
1416 return status;
1417}
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1443{
1444 enum ice_status status;
1445 u8 *buf_copy;
1446
1447 if (!buf || !len)
1448 return ICE_ERR_PARAM;
1449
1450 buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
1451
1452 status = ice_init_pkg(hw, buf_copy, len);
1453 if (status) {
1454
1455 devm_kfree(ice_hw_to_dev(hw), buf_copy);
1456 } else {
1457
1458 hw->pkg_copy = buf_copy;
1459 hw->pkg_size = len;
1460 }
1461
1462 return status;
1463}
1464
1465
1466
1467
1468
1469
1470
1471
1472static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1473{
1474 struct ice_buf_build *bld;
1475 struct ice_buf_hdr *buf;
1476
1477 bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL);
1478 if (!bld)
1479 return NULL;
1480
1481 buf = (struct ice_buf_hdr *)bld;
1482 buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr,
1483 section_entry));
1484 return bld;
1485}
1486
1487
1488
1489
1490
1491
1492
1493
1494static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1495{
1496 devm_kfree(ice_hw_to_dev(hw), bld);
1497}
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512static enum ice_status
1513ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
1514{
1515 struct ice_buf_hdr *buf;
1516 u16 section_count;
1517 u16 data_end;
1518
1519 if (!bld)
1520 return ICE_ERR_PARAM;
1521
1522 buf = (struct ice_buf_hdr *)&bld->buf;
1523
1524
1525 section_count = le16_to_cpu(buf->section_count);
1526 if (section_count > 0)
1527 return ICE_ERR_CFG;
1528
1529 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
1530 return ICE_ERR_CFG;
1531 bld->reserved_section_table_entries += count;
1532
1533 data_end = le16_to_cpu(buf->data_end) +
1534 (count * sizeof(buf->section_entry[0]));
1535 buf->data_end = cpu_to_le16(data_end);
1536
1537 return 0;
1538}
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552static void *
1553ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
1554{
1555 struct ice_buf_hdr *buf;
1556 u16 sect_count;
1557 u16 data_end;
1558
1559 if (!bld || !type || !size)
1560 return NULL;
1561
1562 buf = (struct ice_buf_hdr *)&bld->buf;
1563
1564
1565 data_end = le16_to_cpu(buf->data_end);
1566
1567
1568 data_end = ALIGN(data_end, 4);
1569
1570 if ((data_end + size) > ICE_MAX_S_DATA_END)
1571 return NULL;
1572
1573
1574 sect_count = le16_to_cpu(buf->section_count);
1575 if (sect_count < bld->reserved_section_table_entries) {
1576 void *section_ptr = ((u8 *)buf) + data_end;
1577
1578 buf->section_entry[sect_count].offset = cpu_to_le16(data_end);
1579 buf->section_entry[sect_count].size = cpu_to_le16(size);
1580 buf->section_entry[sect_count].type = cpu_to_le32(type);
1581
1582 data_end += size;
1583 buf->data_end = cpu_to_le16(data_end);
1584
1585 buf->section_count = cpu_to_le16(sect_count + 1);
1586 return section_ptr;
1587 }
1588
1589
1590 return NULL;
1591}
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
1604{
1605 struct ice_buf_hdr *buf;
1606
1607 if (!bld)
1608 return 0;
1609
1610 buf = (struct ice_buf_hdr *)&bld->buf;
1611 return le16_to_cpu(buf->section_count);
1612}
1613
1614
1615
1616
1617
1618
1619
1620static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
1621{
1622 if (!bld)
1623 return NULL;
1624
1625 return &bld->buf;
1626}
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
1638{
1639 u16 i;
1640
1641 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1642 if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
1643 if (index)
1644 *index = i;
1645 return true;
1646 }
1647
1648 return false;
1649}
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
1661{
1662 bool res;
1663
1664 mutex_lock(&hw->tnl_lock);
1665 res = ice_tunnel_port_in_use_hlpr(hw, port, index);
1666 mutex_unlock(&hw->tnl_lock);
1667
1668 return res;
1669}
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679static bool
1680ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
1681 u16 *index)
1682{
1683 u16 i;
1684
1685 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1686 if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
1687 hw->tnl.tbl[i].type == type) {
1688 if (index)
1689 *index = i;
1690 return true;
1691 }
1692
1693 return false;
1694}
1695
1696
1697
1698
1699
1700
1701
1702bool
1703ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
1704 u16 *port)
1705{
1706 bool res = false;
1707 u16 i;
1708
1709 mutex_lock(&hw->tnl_lock);
1710
1711 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1712 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
1713 (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
1714 *port = hw->tnl.tbl[i].port;
1715 res = true;
1716 break;
1717 }
1718
1719 mutex_unlock(&hw->tnl_lock);
1720
1721 return res;
1722}
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734enum ice_status
1735ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
1736{
1737 struct ice_boost_tcam_section *sect_rx, *sect_tx;
1738 enum ice_status status = ICE_ERR_MAX_LIMIT;
1739 struct ice_buf_build *bld;
1740 u16 index;
1741
1742 mutex_lock(&hw->tnl_lock);
1743
1744 if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
1745 hw->tnl.tbl[index].ref++;
1746 status = 0;
1747 goto ice_create_tunnel_end;
1748 }
1749
1750 if (!ice_find_free_tunnel_entry(hw, type, &index)) {
1751 status = ICE_ERR_OUT_OF_RANGE;
1752 goto ice_create_tunnel_end;
1753 }
1754
1755 bld = ice_pkg_buf_alloc(hw);
1756 if (!bld) {
1757 status = ICE_ERR_NO_MEMORY;
1758 goto ice_create_tunnel_end;
1759 }
1760
1761
1762 if (ice_pkg_buf_reserve_section(bld, 2))
1763 goto ice_create_tunnel_err;
1764
1765 sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
1766 struct_size(sect_rx, tcam, 1));
1767 if (!sect_rx)
1768 goto ice_create_tunnel_err;
1769 sect_rx->count = cpu_to_le16(1);
1770
1771 sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
1772 struct_size(sect_tx, tcam, 1));
1773 if (!sect_tx)
1774 goto ice_create_tunnel_err;
1775 sect_tx->count = cpu_to_le16(1);
1776
1777
1778 memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
1779 sizeof(*sect_rx->tcam));
1780
1781
1782
1783
1784 ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
1785 (u8 *)&port, NULL, NULL, NULL,
1786 (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
1787 sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
1788
1789
1790 memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
1791
1792 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
1793 if (!status) {
1794 hw->tnl.tbl[index].port = port;
1795 hw->tnl.tbl[index].in_use = true;
1796 hw->tnl.tbl[index].ref = 1;
1797 }
1798
1799ice_create_tunnel_err:
1800 ice_pkg_buf_free(hw, bld);
1801
1802ice_create_tunnel_end:
1803 mutex_unlock(&hw->tnl_lock);
1804
1805 return status;
1806}
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
1819{
1820 struct ice_boost_tcam_section *sect_rx, *sect_tx;
1821 enum ice_status status = ICE_ERR_MAX_LIMIT;
1822 struct ice_buf_build *bld;
1823 u16 count = 0;
1824 u16 index;
1825 u16 size;
1826 u16 i;
1827
1828 mutex_lock(&hw->tnl_lock);
1829
1830 if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
1831 if (hw->tnl.tbl[index].ref > 1) {
1832 hw->tnl.tbl[index].ref--;
1833 status = 0;
1834 goto ice_destroy_tunnel_end;
1835 }
1836
1837
1838 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1839 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
1840 (all || hw->tnl.tbl[i].port == port))
1841 count++;
1842
1843 if (!count) {
1844 status = ICE_ERR_PARAM;
1845 goto ice_destroy_tunnel_end;
1846 }
1847
1848
1849 size = struct_size(sect_rx, tcam, count);
1850
1851 bld = ice_pkg_buf_alloc(hw);
1852 if (!bld) {
1853 status = ICE_ERR_NO_MEMORY;
1854 goto ice_destroy_tunnel_end;
1855 }
1856
1857
1858 if (ice_pkg_buf_reserve_section(bld, 2))
1859 goto ice_destroy_tunnel_err;
1860
1861 sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
1862 size);
1863 if (!sect_rx)
1864 goto ice_destroy_tunnel_err;
1865 sect_rx->count = cpu_to_le16(1);
1866
1867 sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
1868 size);
1869 if (!sect_tx)
1870 goto ice_destroy_tunnel_err;
1871 sect_tx->count = cpu_to_le16(1);
1872
1873
1874
1875
1876 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1877 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
1878 (all || hw->tnl.tbl[i].port == port)) {
1879 memcpy(sect_rx->tcam + i, hw->tnl.tbl[i].boost_entry,
1880 sizeof(*sect_rx->tcam));
1881 memcpy(sect_tx->tcam + i, hw->tnl.tbl[i].boost_entry,
1882 sizeof(*sect_tx->tcam));
1883 hw->tnl.tbl[i].marked = true;
1884 }
1885
1886 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
1887 if (!status)
1888 for (i = 0; i < hw->tnl.count &&
1889 i < ICE_TUNNEL_MAX_ENTRIES; i++)
1890 if (hw->tnl.tbl[i].marked) {
1891 hw->tnl.tbl[i].ref = 0;
1892 hw->tnl.tbl[i].port = 0;
1893 hw->tnl.tbl[i].in_use = false;
1894 hw->tnl.tbl[i].marked = false;
1895 }
1896
1897ice_destroy_tunnel_err:
1898 ice_pkg_buf_free(hw, bld);
1899
1900ice_destroy_tunnel_end:
1901 mutex_unlock(&hw->tnl_lock);
1902
1903 return status;
1904}
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919static enum ice_status
1920ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
1921{
1922 if (ptype >= ICE_XLT1_CNT || !ptg)
1923 return ICE_ERR_PARAM;
1924
1925 *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
1926 return 0;
1927}
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
1939{
1940 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
1941}
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953static enum ice_status
1954ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
1955{
1956 struct ice_ptg_ptype **ch;
1957 struct ice_ptg_ptype *p;
1958
1959 if (ptype > ICE_XLT1_CNT - 1)
1960 return ICE_ERR_PARAM;
1961
1962 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
1963 return ICE_ERR_DOES_NOT_EXIST;
1964
1965
1966 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
1967 return ICE_ERR_CFG;
1968
1969
1970 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
1971 ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
1972 while (p) {
1973 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
1974 *ch = p->next_ptype;
1975 break;
1976 }
1977
1978 ch = &p->next_ptype;
1979 p = p->next_ptype;
1980 }
1981
1982 hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
1983 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
1984
1985 return 0;
1986}
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000static enum ice_status
2001ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2002{
2003 enum ice_status status;
2004 u8 original_ptg;
2005
2006 if (ptype > ICE_XLT1_CNT - 1)
2007 return ICE_ERR_PARAM;
2008
2009 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
2010 return ICE_ERR_DOES_NOT_EXIST;
2011
2012 status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
2013 if (status)
2014 return status;
2015
2016
2017 if (original_ptg == ptg)
2018 return 0;
2019
2020
2021 if (original_ptg != ICE_DEFAULT_PTG)
2022 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
2023
2024
2025 if (ptg == ICE_DEFAULT_PTG)
2026 return 0;
2027
2028
2029 hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
2030 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2031 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
2032 &hw->blk[blk].xlt1.ptypes[ptype];
2033
2034 hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
2035 hw->blk[blk].xlt1.t[ptype] = ptg;
2036
2037 return 0;
2038}
2039
2040
2041struct ice_blk_size_details {
2042 u16 xlt1;
2043 u16 xlt2;
2044 u16 prof_tcam;
2045 u16 prof_id;
2046 u8 prof_cdid_bits;
2047 u16 prof_redir;
2048 u16 es;
2049 u16 fvw;
2050 u8 overwrite;
2051 u8 reverse;
2052};
2053
2054static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069 { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48,
2070 false, false },
2071 { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32,
2072 false, false },
2073 { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2074 false, true },
2075 { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2076 true, true },
2077 { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24,
2078 false, false },
2079};
2080
2081enum ice_sid_all {
2082 ICE_SID_XLT1_OFF = 0,
2083 ICE_SID_XLT2_OFF,
2084 ICE_SID_PR_OFF,
2085 ICE_SID_PR_REDIR_OFF,
2086 ICE_SID_ES_OFF,
2087 ICE_SID_OFF_COUNT,
2088};
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099static bool
2100ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
2101{
2102 struct ice_vsig_prof *tmp1;
2103 struct ice_vsig_prof *tmp2;
2104 u16 chk_count = 0;
2105 u16 count = 0;
2106
2107
2108 list_for_each_entry(tmp1, list1, list)
2109 count++;
2110 list_for_each_entry(tmp2, list2, list)
2111 chk_count++;
2112 if (!count || count != chk_count)
2113 return false;
2114
2115 tmp1 = list_first_entry(list1, struct ice_vsig_prof, list);
2116 tmp2 = list_first_entry(list2, struct ice_vsig_prof, list);
2117
2118
2119
2120
2121 while (count--) {
2122 if (tmp2->profile_cookie != tmp1->profile_cookie)
2123 return false;
2124
2125 tmp1 = list_next_entry(tmp1, list);
2126 tmp2 = list_next_entry(tmp2, list);
2127 }
2128
2129 return true;
2130}
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144static enum ice_status
2145ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
2146{
2147 if (!vsig || vsi >= ICE_MAX_VSI)
2148 return ICE_ERR_PARAM;
2149
2150
2151
2152
2153
2154 *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
2155
2156 return 0;
2157}
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2168{
2169 u16 idx = vsig & ICE_VSIG_IDX_M;
2170
2171 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
2172 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2173 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
2174 }
2175
2176 return ICE_VSIG_VALUE(idx, hw->pf_id);
2177}
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
2188{
2189 u16 i;
2190
2191 for (i = 1; i < ICE_MAX_VSIGS; i++)
2192 if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2193 return ice_vsig_alloc_val(hw, blk, i);
2194
2195 return ICE_DEFAULT_VSIG;
2196}
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213static enum ice_status
2214ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
2215 struct list_head *chs, u16 *vsig)
2216{
2217 struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
2218 u16 i;
2219
2220 for (i = 0; i < xlt2->count; i++)
2221 if (xlt2->vsig_tbl[i].in_use &&
2222 ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
2223 *vsig = ICE_VSIG_VALUE(i, hw->pf_id);
2224 return 0;
2225 }
2226
2227 return ICE_ERR_DOES_NOT_EXIST;
2228}
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239static enum ice_status
2240ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2241{
2242 struct ice_vsig_prof *dtmp, *del;
2243 struct ice_vsig_vsi *vsi_cur;
2244 u16 idx;
2245
2246 idx = vsig & ICE_VSIG_IDX_M;
2247 if (idx >= ICE_MAX_VSIGS)
2248 return ICE_ERR_PARAM;
2249
2250 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2251 return ICE_ERR_DOES_NOT_EXIST;
2252
2253 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
2254
2255 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2256
2257
2258
2259 if (vsi_cur) {
2260
2261 do {
2262 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
2263
2264 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2265 vsi_cur->changed = 1;
2266 vsi_cur->next_vsi = NULL;
2267 vsi_cur = tmp;
2268 } while (vsi_cur);
2269
2270
2271 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
2272 }
2273
2274
2275 list_for_each_entry_safe(del, dtmp,
2276 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2277 list) {
2278 list_del(&del->list);
2279 devm_kfree(ice_hw_to_dev(hw), del);
2280 }
2281
2282
2283
2284
2285 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2286
2287 return 0;
2288}
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300static enum ice_status
2301ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2302{
2303 struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
2304 u16 idx;
2305
2306 idx = vsig & ICE_VSIG_IDX_M;
2307
2308 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2309 return ICE_ERR_PARAM;
2310
2311 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2312 return ICE_ERR_DOES_NOT_EXIST;
2313
2314
2315 if (idx == ICE_DEFAULT_VSIG)
2316 return 0;
2317
2318 vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2319 if (!(*vsi_head))
2320 return ICE_ERR_CFG;
2321
2322 vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
2323 vsi_cur = (*vsi_head);
2324
2325
2326 while (vsi_cur) {
2327 if (vsi_tgt == vsi_cur) {
2328 (*vsi_head) = vsi_cur->next_vsi;
2329 break;
2330 }
2331 vsi_head = &vsi_cur->next_vsi;
2332 vsi_cur = vsi_cur->next_vsi;
2333 }
2334
2335
2336 if (!vsi_cur)
2337 return ICE_ERR_DOES_NOT_EXIST;
2338
2339 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2340 vsi_cur->changed = 1;
2341 vsi_cur->next_vsi = NULL;
2342
2343 return 0;
2344}
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358static enum ice_status
2359ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2360{
2361 struct ice_vsig_vsi *tmp;
2362 enum ice_status status;
2363 u16 orig_vsig, idx;
2364
2365 idx = vsig & ICE_VSIG_IDX_M;
2366
2367 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2368 return ICE_ERR_PARAM;
2369
2370
2371
2372
2373 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
2374 vsig != ICE_DEFAULT_VSIG)
2375 return ICE_ERR_DOES_NOT_EXIST;
2376
2377 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
2378 if (status)
2379 return status;
2380
2381
2382 if (orig_vsig == vsig)
2383 return 0;
2384
2385 if (orig_vsig != ICE_DEFAULT_VSIG) {
2386
2387 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
2388 if (status)
2389 return status;
2390 }
2391
2392 if (idx == ICE_DEFAULT_VSIG)
2393 return 0;
2394
2395
2396 hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
2397 hw->blk[blk].xlt2.vsis[vsi].changed = 1;
2398
2399
2400 tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2401 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
2402 &hw->blk[blk].xlt2.vsis[vsi];
2403 hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
2404 hw->blk[blk].xlt2.t[vsi] = vsig;
2405
2406 return 0;
2407}
2408
2409
2410
2411
2412
2413
2414
2415
2416static enum ice_status
2417ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
2418 struct ice_fv_word *fv, u8 *prof_id)
2419{
2420 struct ice_es *es = &hw->blk[blk].es;
2421 u16 off;
2422 u8 i;
2423
2424
2425
2426
2427 if (blk == ICE_BLK_FD)
2428 return ICE_ERR_DOES_NOT_EXIST;
2429
2430 for (i = 0; i < (u8)es->count; i++) {
2431 off = i * es->fvw;
2432
2433 if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
2434 continue;
2435
2436 *prof_id = i;
2437 return 0;
2438 }
2439
2440 return ICE_ERR_DOES_NOT_EXIST;
2441}
2442
2443
2444
2445
2446
2447
2448static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2449{
2450 switch (blk) {
2451 case ICE_BLK_FD:
2452 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
2453 break;
2454 case ICE_BLK_RSS:
2455 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
2456 break;
2457 default:
2458 return false;
2459 }
2460 return true;
2461}
2462
2463
2464
2465
2466
2467
2468static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2469{
2470 switch (blk) {
2471 case ICE_BLK_FD:
2472 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
2473 break;
2474 case ICE_BLK_RSS:
2475 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
2476 break;
2477 default:
2478 return false;
2479 }
2480 return true;
2481}
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492static enum ice_status
2493ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx)
2494{
2495 u16 res_type;
2496
2497 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2498 return ICE_ERR_PARAM;
2499
2500 return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx);
2501}
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511static enum ice_status
2512ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
2513{
2514 u16 res_type;
2515
2516 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2517 return ICE_ERR_PARAM;
2518
2519 return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
2520}
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531static enum ice_status
2532ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
2533{
2534 enum ice_status status;
2535 u16 res_type;
2536 u16 get_prof;
2537
2538 if (!ice_prof_id_rsrc_type(blk, &res_type))
2539 return ICE_ERR_PARAM;
2540
2541 status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
2542 if (!status)
2543 *prof_id = (u8)get_prof;
2544
2545 return status;
2546}
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556static enum ice_status
2557ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2558{
2559 u16 tmp_prof_id = (u16)prof_id;
2560 u16 res_type;
2561
2562 if (!ice_prof_id_rsrc_type(blk, &res_type))
2563 return ICE_ERR_PARAM;
2564
2565 return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
2566}
2567
2568
2569
2570
2571
2572
2573
2574static enum ice_status
2575ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2576{
2577 if (prof_id > hw->blk[blk].es.count)
2578 return ICE_ERR_PARAM;
2579
2580 hw->blk[blk].es.ref_count[prof_id]++;
2581
2582 return 0;
2583}
2584
2585
2586
2587
2588
2589
2590
2591
2592static void
2593ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
2594 struct ice_fv_word *fv)
2595{
2596 u16 off;
2597
2598 off = prof_id * hw->blk[blk].es.fvw;
2599 if (!fv) {
2600 memset(&hw->blk[blk].es.t[off], 0,
2601 hw->blk[blk].es.fvw * sizeof(*fv));
2602 hw->blk[blk].es.written[prof_id] = false;
2603 } else {
2604 memcpy(&hw->blk[blk].es.t[off], fv,
2605 hw->blk[blk].es.fvw * sizeof(*fv));
2606 }
2607}
2608
2609
2610
2611
2612
2613
2614
2615static enum ice_status
2616ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2617{
2618 if (prof_id > hw->blk[blk].es.count)
2619 return ICE_ERR_PARAM;
2620
2621 if (hw->blk[blk].es.ref_count[prof_id] > 0) {
2622 if (!--hw->blk[blk].es.ref_count[prof_id]) {
2623 ice_write_es(hw, blk, prof_id, NULL);
2624 return ice_free_prof_id(hw, blk, prof_id);
2625 }
2626 }
2627
2628 return 0;
2629}
2630
2631
2632static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
2633
2634 { ICE_SID_XLT1_SW,
2635 ICE_SID_XLT2_SW,
2636 ICE_SID_PROFID_TCAM_SW,
2637 ICE_SID_PROFID_REDIR_SW,
2638 ICE_SID_FLD_VEC_SW
2639 },
2640
2641
2642 { ICE_SID_XLT1_ACL,
2643 ICE_SID_XLT2_ACL,
2644 ICE_SID_PROFID_TCAM_ACL,
2645 ICE_SID_PROFID_REDIR_ACL,
2646 ICE_SID_FLD_VEC_ACL
2647 },
2648
2649
2650 { ICE_SID_XLT1_FD,
2651 ICE_SID_XLT2_FD,
2652 ICE_SID_PROFID_TCAM_FD,
2653 ICE_SID_PROFID_REDIR_FD,
2654 ICE_SID_FLD_VEC_FD
2655 },
2656
2657
2658 { ICE_SID_XLT1_RSS,
2659 ICE_SID_XLT2_RSS,
2660 ICE_SID_PROFID_TCAM_RSS,
2661 ICE_SID_PROFID_REDIR_RSS,
2662 ICE_SID_FLD_VEC_RSS
2663 },
2664
2665
2666 { ICE_SID_XLT1_PE,
2667 ICE_SID_XLT2_PE,
2668 ICE_SID_PROFID_TCAM_PE,
2669 ICE_SID_PROFID_REDIR_PE,
2670 ICE_SID_FLD_VEC_PE
2671 }
2672};
2673
2674
2675
2676
2677
2678
2679static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
2680{
2681 u16 pt;
2682
2683 for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
2684 u8 ptg;
2685
2686 ptg = hw->blk[blk].xlt1.t[pt];
2687 if (ptg != ICE_DEFAULT_PTG) {
2688 ice_ptg_alloc_val(hw, blk, ptg);
2689 ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
2690 }
2691 }
2692}
2693
2694
2695
2696
2697
2698
2699static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
2700{
2701 u16 vsi;
2702
2703 for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
2704 u16 vsig;
2705
2706 vsig = hw->blk[blk].xlt2.t[vsi];
2707 if (vsig) {
2708 ice_vsig_alloc_val(hw, blk, vsig);
2709 ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
2710
2711
2712
2713 hw->blk[blk].xlt2.vsis[vsi].changed = 0;
2714 }
2715 }
2716}
2717
2718
2719
2720
2721
2722static void ice_init_sw_db(struct ice_hw *hw)
2723{
2724 u16 i;
2725
2726 for (i = 0; i < ICE_BLK_COUNT; i++) {
2727 ice_init_sw_xlt1_db(hw, (enum ice_block)i);
2728 ice_init_sw_xlt2_db(hw, (enum ice_block)i);
2729 }
2730}
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
2745{
2746 u32 dst_len, sect_len, offset = 0;
2747 struct ice_prof_redir_section *pr;
2748 struct ice_prof_id_section *pid;
2749 struct ice_xlt1_section *xlt1;
2750 struct ice_xlt2_section *xlt2;
2751 struct ice_sw_fv_section *es;
2752 struct ice_pkg_enum state;
2753 u8 *src, *dst;
2754 void *sect;
2755
2756
2757
2758
2759
2760 if (!hw->seg) {
2761 ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
2762 return;
2763 }
2764
2765 memset(&state, 0, sizeof(state));
2766
2767 sect = ice_pkg_enum_section(hw->seg, &state, sid);
2768
2769 while (sect) {
2770 switch (sid) {
2771 case ICE_SID_XLT1_SW:
2772 case ICE_SID_XLT1_FD:
2773 case ICE_SID_XLT1_RSS:
2774 case ICE_SID_XLT1_ACL:
2775 case ICE_SID_XLT1_PE:
2776 xlt1 = (struct ice_xlt1_section *)sect;
2777 src = xlt1->value;
2778 sect_len = le16_to_cpu(xlt1->count) *
2779 sizeof(*hw->blk[block_id].xlt1.t);
2780 dst = hw->blk[block_id].xlt1.t;
2781 dst_len = hw->blk[block_id].xlt1.count *
2782 sizeof(*hw->blk[block_id].xlt1.t);
2783 break;
2784 case ICE_SID_XLT2_SW:
2785 case ICE_SID_XLT2_FD:
2786 case ICE_SID_XLT2_RSS:
2787 case ICE_SID_XLT2_ACL:
2788 case ICE_SID_XLT2_PE:
2789 xlt2 = (struct ice_xlt2_section *)sect;
2790 src = (__force u8 *)xlt2->value;
2791 sect_len = le16_to_cpu(xlt2->count) *
2792 sizeof(*hw->blk[block_id].xlt2.t);
2793 dst = (u8 *)hw->blk[block_id].xlt2.t;
2794 dst_len = hw->blk[block_id].xlt2.count *
2795 sizeof(*hw->blk[block_id].xlt2.t);
2796 break;
2797 case ICE_SID_PROFID_TCAM_SW:
2798 case ICE_SID_PROFID_TCAM_FD:
2799 case ICE_SID_PROFID_TCAM_RSS:
2800 case ICE_SID_PROFID_TCAM_ACL:
2801 case ICE_SID_PROFID_TCAM_PE:
2802 pid = (struct ice_prof_id_section *)sect;
2803 src = (u8 *)pid->entry;
2804 sect_len = le16_to_cpu(pid->count) *
2805 sizeof(*hw->blk[block_id].prof.t);
2806 dst = (u8 *)hw->blk[block_id].prof.t;
2807 dst_len = hw->blk[block_id].prof.count *
2808 sizeof(*hw->blk[block_id].prof.t);
2809 break;
2810 case ICE_SID_PROFID_REDIR_SW:
2811 case ICE_SID_PROFID_REDIR_FD:
2812 case ICE_SID_PROFID_REDIR_RSS:
2813 case ICE_SID_PROFID_REDIR_ACL:
2814 case ICE_SID_PROFID_REDIR_PE:
2815 pr = (struct ice_prof_redir_section *)sect;
2816 src = pr->redir_value;
2817 sect_len = le16_to_cpu(pr->count) *
2818 sizeof(*hw->blk[block_id].prof_redir.t);
2819 dst = hw->blk[block_id].prof_redir.t;
2820 dst_len = hw->blk[block_id].prof_redir.count *
2821 sizeof(*hw->blk[block_id].prof_redir.t);
2822 break;
2823 case ICE_SID_FLD_VEC_SW:
2824 case ICE_SID_FLD_VEC_FD:
2825 case ICE_SID_FLD_VEC_RSS:
2826 case ICE_SID_FLD_VEC_ACL:
2827 case ICE_SID_FLD_VEC_PE:
2828 es = (struct ice_sw_fv_section *)sect;
2829 src = (u8 *)es->fv;
2830 sect_len = (u32)(le16_to_cpu(es->count) *
2831 hw->blk[block_id].es.fvw) *
2832 sizeof(*hw->blk[block_id].es.t);
2833 dst = (u8 *)hw->blk[block_id].es.t;
2834 dst_len = (u32)(hw->blk[block_id].es.count *
2835 hw->blk[block_id].es.fvw) *
2836 sizeof(*hw->blk[block_id].es.t);
2837 break;
2838 default:
2839 return;
2840 }
2841
2842
2843
2844
2845 if (offset > dst_len)
2846 return;
2847
2848
2849
2850
2851
2852
2853 if ((offset + sect_len) > dst_len)
2854 sect_len = dst_len - offset;
2855
2856 memcpy(dst + offset, src, sect_len);
2857 offset += sect_len;
2858 sect = ice_pkg_enum_section(NULL, &state, sid);
2859 }
2860}
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870void ice_fill_blk_tbls(struct ice_hw *hw)
2871{
2872 u8 i;
2873
2874 for (i = 0; i < ICE_BLK_COUNT; i++) {
2875 enum ice_block blk_id = (enum ice_block)i;
2876
2877 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
2878 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
2879 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
2880 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
2881 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
2882 }
2883
2884 ice_init_sw_db(hw);
2885}
2886
2887
2888
2889
2890
2891
2892static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
2893{
2894 struct ice_es *es = &hw->blk[blk_idx].es;
2895 struct ice_prof_map *del, *tmp;
2896
2897 mutex_lock(&es->prof_map_lock);
2898 list_for_each_entry_safe(del, tmp, &es->prof_map, list) {
2899 list_del(&del->list);
2900 devm_kfree(ice_hw_to_dev(hw), del);
2901 }
2902 INIT_LIST_HEAD(&es->prof_map);
2903 mutex_unlock(&es->prof_map_lock);
2904}
2905
2906
2907
2908
2909
2910
2911static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
2912{
2913 struct ice_flow_prof *p, *tmp;
2914
2915 mutex_lock(&hw->fl_profs_locks[blk_idx]);
2916 list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) {
2917 struct ice_flow_entry *e, *t;
2918
2919 list_for_each_entry_safe(e, t, &p->entries, l_entry)
2920 ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
2921 ICE_FLOW_ENTRY_HNDL(e));
2922
2923 list_del(&p->l_entry);
2924
2925 mutex_destroy(&p->entries_lock);
2926 devm_kfree(ice_hw_to_dev(hw), p);
2927 }
2928 mutex_unlock(&hw->fl_profs_locks[blk_idx]);
2929
2930
2931
2932
2933 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
2934}
2935
2936
2937
2938
2939
2940
2941static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
2942{
2943 u16 i;
2944
2945 if (!hw->blk[blk].xlt2.vsig_tbl)
2946 return;
2947
2948 for (i = 1; i < ICE_MAX_VSIGS; i++)
2949 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2950 ice_vsig_free(hw, blk, i);
2951}
2952
2953
2954
2955
2956
2957void ice_free_hw_tbls(struct ice_hw *hw)
2958{
2959 struct ice_rss_cfg *r, *rt;
2960 u8 i;
2961
2962 for (i = 0; i < ICE_BLK_COUNT; i++) {
2963 if (hw->blk[i].is_list_init) {
2964 struct ice_es *es = &hw->blk[i].es;
2965
2966 ice_free_prof_map(hw, i);
2967 mutex_destroy(&es->prof_map_lock);
2968
2969 ice_free_flow_profs(hw, i);
2970 mutex_destroy(&hw->fl_profs_locks[i]);
2971
2972 hw->blk[i].is_list_init = false;
2973 }
2974 ice_free_vsig_tbl(hw, (enum ice_block)i);
2975 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes);
2976 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl);
2977 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
2978 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t);
2979 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl);
2980 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis);
2981 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t);
2982 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
2983 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
2984 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
2985 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
2986 }
2987
2988 list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
2989 list_del(&r->l_entry);
2990 devm_kfree(ice_hw_to_dev(hw), r);
2991 }
2992 mutex_destroy(&hw->rss_locks);
2993 memset(hw->blk, 0, sizeof(hw->blk));
2994}
2995
2996
2997
2998
2999
3000
3001static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
3002{
3003 mutex_init(&hw->fl_profs_locks[blk_idx]);
3004 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3005}
3006
3007
3008
3009
3010
3011void ice_clear_hw_tbls(struct ice_hw *hw)
3012{
3013 u8 i;
3014
3015 for (i = 0; i < ICE_BLK_COUNT; i++) {
3016 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3017 struct ice_prof_tcam *prof = &hw->blk[i].prof;
3018 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3019 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3020 struct ice_es *es = &hw->blk[i].es;
3021
3022 if (hw->blk[i].is_list_init) {
3023 ice_free_prof_map(hw, i);
3024 ice_free_flow_profs(hw, i);
3025 }
3026
3027 ice_free_vsig_tbl(hw, (enum ice_block)i);
3028
3029 memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes));
3030 memset(xlt1->ptg_tbl, 0,
3031 ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl));
3032 memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t));
3033
3034 memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis));
3035 memset(xlt2->vsig_tbl, 0,
3036 xlt2->count * sizeof(*xlt2->vsig_tbl));
3037 memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t));
3038
3039 memset(prof->t, 0, prof->count * sizeof(*prof->t));
3040 memset(prof_redir->t, 0,
3041 prof_redir->count * sizeof(*prof_redir->t));
3042
3043 memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
3044 memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
3045 memset(es->written, 0, es->count * sizeof(*es->written));
3046 }
3047}
3048
3049
3050
3051
3052
3053enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
3054{
3055 u8 i;
3056
3057 mutex_init(&hw->rss_locks);
3058 INIT_LIST_HEAD(&hw->rss_list_head);
3059 for (i = 0; i < ICE_BLK_COUNT; i++) {
3060 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3061 struct ice_prof_tcam *prof = &hw->blk[i].prof;
3062 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3063 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3064 struct ice_es *es = &hw->blk[i].es;
3065 u16 j;
3066
3067 if (hw->blk[i].is_list_init)
3068 continue;
3069
3070 ice_init_flow_profs(hw, i);
3071 mutex_init(&es->prof_map_lock);
3072 INIT_LIST_HEAD(&es->prof_map);
3073 hw->blk[i].is_list_init = true;
3074
3075 hw->blk[i].overwrite = blk_sizes[i].overwrite;
3076 es->reverse = blk_sizes[i].reverse;
3077
3078 xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
3079 xlt1->count = blk_sizes[i].xlt1;
3080
3081 xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
3082 sizeof(*xlt1->ptypes), GFP_KERNEL);
3083
3084 if (!xlt1->ptypes)
3085 goto err;
3086
3087 xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS,
3088 sizeof(*xlt1->ptg_tbl),
3089 GFP_KERNEL);
3090
3091 if (!xlt1->ptg_tbl)
3092 goto err;
3093
3094 xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
3095 sizeof(*xlt1->t), GFP_KERNEL);
3096 if (!xlt1->t)
3097 goto err;
3098
3099 xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
3100 xlt2->count = blk_sizes[i].xlt2;
3101
3102 xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3103 sizeof(*xlt2->vsis), GFP_KERNEL);
3104
3105 if (!xlt2->vsis)
3106 goto err;
3107
3108 xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3109 sizeof(*xlt2->vsig_tbl),
3110 GFP_KERNEL);
3111 if (!xlt2->vsig_tbl)
3112 goto err;
3113
3114 for (j = 0; j < xlt2->count; j++)
3115 INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
3116
3117 xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3118 sizeof(*xlt2->t), GFP_KERNEL);
3119 if (!xlt2->t)
3120 goto err;
3121
3122 prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
3123 prof->count = blk_sizes[i].prof_tcam;
3124 prof->max_prof_id = blk_sizes[i].prof_id;
3125 prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
3126 prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count,
3127 sizeof(*prof->t), GFP_KERNEL);
3128
3129 if (!prof->t)
3130 goto err;
3131
3132 prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
3133 prof_redir->count = blk_sizes[i].prof_redir;
3134 prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw),
3135 prof_redir->count,
3136 sizeof(*prof_redir->t),
3137 GFP_KERNEL);
3138
3139 if (!prof_redir->t)
3140 goto err;
3141
3142 es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
3143 es->count = blk_sizes[i].es;
3144 es->fvw = blk_sizes[i].fvw;
3145 es->t = devm_kcalloc(ice_hw_to_dev(hw),
3146 (u32)(es->count * es->fvw),
3147 sizeof(*es->t), GFP_KERNEL);
3148 if (!es->t)
3149 goto err;
3150
3151 es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3152 sizeof(*es->ref_count),
3153 GFP_KERNEL);
3154 if (!es->ref_count)
3155 goto err;
3156
3157 es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3158 sizeof(*es->written), GFP_KERNEL);
3159 if (!es->written)
3160 goto err;
3161 }
3162 return 0;
3163
3164err:
3165 ice_free_hw_tbls(hw);
3166 return ICE_ERR_NO_MEMORY;
3167}
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182static enum ice_status
3183ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
3184 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3185 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
3186 u8 key[ICE_TCAM_KEY_SZ])
3187{
3188 struct ice_prof_id_key inkey;
3189
3190 inkey.xlt1 = ptg;
3191 inkey.xlt2_cdid = cpu_to_le16(vsig);
3192 inkey.flags = cpu_to_le16(flags);
3193
3194 switch (hw->blk[blk].prof.cdid_bits) {
3195 case 0:
3196 break;
3197 case 2:
3198#define ICE_CD_2_M 0xC000U
3199#define ICE_CD_2_S 14
3200 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_2_M);
3201 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_2_S);
3202 break;
3203 case 4:
3204#define ICE_CD_4_M 0xF000U
3205#define ICE_CD_4_S 12
3206 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_4_M);
3207 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_4_S);
3208 break;
3209 case 8:
3210#define ICE_CD_8_M 0xFF00U
3211#define ICE_CD_8_S 16
3212 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_8_M);
3213 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_8_S);
3214 break;
3215 default:
3216 ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
3217 break;
3218 }
3219
3220 return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
3221 nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
3222}
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238static enum ice_status
3239ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
3240 u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
3241 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3242 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
3243 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
3244{
3245 struct ice_prof_tcam_entry;
3246 enum ice_status status;
3247
3248 status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
3249 dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
3250 if (!status) {
3251 hw->blk[blk].prof.t[idx].addr = cpu_to_le16(idx);
3252 hw->blk[blk].prof.t[idx].prof_id = prof_id;
3253 }
3254
3255 return status;
3256}
3257
3258
3259
3260
3261
3262
3263
3264
3265static enum ice_status
3266ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
3267{
3268 u16 idx = vsig & ICE_VSIG_IDX_M;
3269 struct ice_vsig_vsi *ptr;
3270
3271 *refs = 0;
3272
3273 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
3274 return ICE_ERR_DOES_NOT_EXIST;
3275
3276 ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3277 while (ptr) {
3278 (*refs)++;
3279 ptr = ptr->next_vsi;
3280 }
3281
3282 return 0;
3283}
3284
3285
3286
3287
3288
3289
3290
3291
3292static bool
3293ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
3294{
3295 u16 idx = vsig & ICE_VSIG_IDX_M;
3296 struct ice_vsig_prof *ent;
3297
3298 list_for_each_entry(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3299 list)
3300 if (ent->profile_cookie == hdl)
3301 return true;
3302
3303 ice_debug(hw, ICE_DBG_INIT,
3304 "Characteristic list for VSI group %d not found.\n",
3305 vsig);
3306 return false;
3307}
3308
3309
3310
3311
3312
3313
3314
3315
3316static enum ice_status
3317ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
3318 struct ice_buf_build *bld, struct list_head *chgs)
3319{
3320 u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
3321 struct ice_chs_chg *tmp;
3322
3323 list_for_each_entry(tmp, chgs, list_entry)
3324 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
3325 u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
3326 struct ice_pkg_es *p;
3327 u32 id;
3328
3329 id = ice_sect_id(blk, ICE_VEC_TBL);
3330 p = ice_pkg_buf_alloc_section(bld, id,
3331 struct_size(p, es, 1) +
3332 vec_size -
3333 sizeof(p->es[0]));
3334
3335 if (!p)
3336 return ICE_ERR_MAX_LIMIT;
3337
3338 p->count = cpu_to_le16(1);
3339 p->offset = cpu_to_le16(tmp->prof_id);
3340
3341 memcpy(p->es, &hw->blk[blk].es.t[off], vec_size);
3342 }
3343
3344 return 0;
3345}
3346
3347
3348
3349
3350
3351
3352
3353
3354static enum ice_status
3355ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
3356 struct ice_buf_build *bld, struct list_head *chgs)
3357{
3358 struct ice_chs_chg *tmp;
3359
3360 list_for_each_entry(tmp, chgs, list_entry)
3361 if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
3362 struct ice_prof_id_section *p;
3363 u32 id;
3364
3365 id = ice_sect_id(blk, ICE_PROF_TCAM);
3366 p = ice_pkg_buf_alloc_section(bld, id,
3367 struct_size(p, entry, 1));
3368
3369 if (!p)
3370 return ICE_ERR_MAX_LIMIT;
3371
3372 p->count = cpu_to_le16(1);
3373 p->entry[0].addr = cpu_to_le16(tmp->tcam_idx);
3374 p->entry[0].prof_id = tmp->prof_id;
3375
3376 memcpy(p->entry[0].key,
3377 &hw->blk[blk].prof.t[tmp->tcam_idx].key,
3378 sizeof(hw->blk[blk].prof.t->key));
3379 }
3380
3381 return 0;
3382}
3383
3384
3385
3386
3387
3388
3389
3390static enum ice_status
3391ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
3392 struct list_head *chgs)
3393{
3394 struct ice_chs_chg *tmp;
3395
3396 list_for_each_entry(tmp, chgs, list_entry)
3397 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
3398 struct ice_xlt1_section *p;
3399 u32 id;
3400
3401 id = ice_sect_id(blk, ICE_XLT1);
3402 p = ice_pkg_buf_alloc_section(bld, id,
3403 struct_size(p, value, 1));
3404
3405 if (!p)
3406 return ICE_ERR_MAX_LIMIT;
3407
3408 p->count = cpu_to_le16(1);
3409 p->offset = cpu_to_le16(tmp->ptype);
3410 p->value[0] = tmp->ptg;
3411 }
3412
3413 return 0;
3414}
3415
3416
3417
3418
3419
3420
3421
3422static enum ice_status
3423ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
3424 struct list_head *chgs)
3425{
3426 struct ice_chs_chg *tmp;
3427
3428 list_for_each_entry(tmp, chgs, list_entry) {
3429 struct ice_xlt2_section *p;
3430 u32 id;
3431
3432 switch (tmp->type) {
3433 case ICE_VSIG_ADD:
3434 case ICE_VSI_MOVE:
3435 case ICE_VSIG_REM:
3436 id = ice_sect_id(blk, ICE_XLT2);
3437 p = ice_pkg_buf_alloc_section(bld, id,
3438 struct_size(p, value, 1));
3439
3440 if (!p)
3441 return ICE_ERR_MAX_LIMIT;
3442
3443 p->count = cpu_to_le16(1);
3444 p->offset = cpu_to_le16(tmp->vsi);
3445 p->value[0] = cpu_to_le16(tmp->vsig);
3446 break;
3447 default:
3448 break;
3449 }
3450 }
3451
3452 return 0;
3453}
3454
3455
3456
3457
3458
3459
3460
3461static enum ice_status
3462ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
3463 struct list_head *chgs)
3464{
3465 struct ice_buf_build *b;
3466 struct ice_chs_chg *tmp;
3467 enum ice_status status;
3468 u16 pkg_sects;
3469 u16 xlt1 = 0;
3470 u16 xlt2 = 0;
3471 u16 tcam = 0;
3472 u16 es = 0;
3473 u16 sects;
3474
3475
3476 list_for_each_entry(tmp, chgs, list_entry) {
3477 switch (tmp->type) {
3478 case ICE_PTG_ES_ADD:
3479 if (tmp->add_ptg)
3480 xlt1++;
3481 if (tmp->add_prof)
3482 es++;
3483 break;
3484 case ICE_TCAM_ADD:
3485 tcam++;
3486 break;
3487 case ICE_VSIG_ADD:
3488 case ICE_VSI_MOVE:
3489 case ICE_VSIG_REM:
3490 xlt2++;
3491 break;
3492 default:
3493 break;
3494 }
3495 }
3496 sects = xlt1 + xlt2 + tcam + es;
3497
3498 if (!sects)
3499 return 0;
3500
3501
3502 b = ice_pkg_buf_alloc(hw);
3503 if (!b)
3504 return ICE_ERR_NO_MEMORY;
3505
3506 status = ice_pkg_buf_reserve_section(b, sects);
3507 if (status)
3508 goto error_tmp;
3509
3510
3511 if (es) {
3512 status = ice_prof_bld_es(hw, blk, b, chgs);
3513 if (status)
3514 goto error_tmp;
3515 }
3516
3517 if (tcam) {
3518 status = ice_prof_bld_tcam(hw, blk, b, chgs);
3519 if (status)
3520 goto error_tmp;
3521 }
3522
3523 if (xlt1) {
3524 status = ice_prof_bld_xlt1(blk, b, chgs);
3525 if (status)
3526 goto error_tmp;
3527 }
3528
3529 if (xlt2) {
3530 status = ice_prof_bld_xlt2(blk, b, chgs);
3531 if (status)
3532 goto error_tmp;
3533 }
3534
3535
3536
3537
3538
3539 pkg_sects = ice_pkg_buf_get_active_sections(b);
3540 if (!pkg_sects || pkg_sects != sects) {
3541 status = ICE_ERR_INVAL_SIZE;
3542 goto error_tmp;
3543 }
3544
3545
3546 status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
3547 if (status == ICE_ERR_AQ_ERROR)
3548 ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
3549
3550error_tmp:
3551 ice_pkg_buf_free(hw, b);
3552 return status;
3553}
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
3565{
3566 wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
3567
3568 ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
3569 GLQF_FDMASK_SEL(prof_id), mask_sel);
3570}
3571
3572struct ice_fd_src_dst_pair {
3573 u8 prot_id;
3574 u8 count;
3575 u16 off;
3576};
3577
3578static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
3579
3580 { ICE_PROT_IPV4_OF_OR_S, 2, 12 },
3581 { ICE_PROT_IPV4_OF_OR_S, 2, 16 },
3582
3583 { ICE_PROT_IPV4_IL, 2, 12 },
3584 { ICE_PROT_IPV4_IL, 2, 16 },
3585
3586 { ICE_PROT_IPV6_OF_OR_S, 8, 8 },
3587 { ICE_PROT_IPV6_OF_OR_S, 8, 24 },
3588
3589 { ICE_PROT_IPV6_IL, 8, 8 },
3590 { ICE_PROT_IPV6_IL, 8, 24 },
3591
3592 { ICE_PROT_TCP_IL, 1, 0 },
3593 { ICE_PROT_TCP_IL, 1, 2 },
3594
3595 { ICE_PROT_UDP_OF, 1, 0 },
3596 { ICE_PROT_UDP_OF, 1, 2 },
3597
3598 { ICE_PROT_UDP_IL_OR_S, 1, 0 },
3599 { ICE_PROT_UDP_IL_OR_S, 1, 2 },
3600
3601 { ICE_PROT_SCTP_IL, 1, 0 },
3602 { ICE_PROT_SCTP_IL, 1, 2 }
3603};
3604
3605#define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs)
3606
3607
3608
3609
3610
3611
3612
3613static enum ice_status
3614ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
3615{
3616 DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
3617 u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
3618#define ICE_FD_FV_NOT_FOUND (-2)
3619 s8 first_free = ICE_FD_FV_NOT_FOUND;
3620 u8 used[ICE_MAX_FV_WORDS] = { 0 };
3621 s8 orig_free, si;
3622 u32 mask_sel = 0;
3623 u8 i, j, k;
3624
3625 bitmap_zero(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
3626
3627
3628
3629
3630
3631
3632
3633
3634 for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
3635
3636
3637
3638 if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
3639 ICE_PROT_INVALID)
3640 first_free = i - 1;
3641
3642 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
3643 if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
3644 es[i].off == ice_fd_pairs[j].off) {
3645 set_bit(j, pair_list);
3646 pair_start[j] = i;
3647 }
3648 }
3649
3650 orig_free = first_free;
3651
3652
3653 for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
3654 u8 bit1 = test_bit(i + 1, pair_list);
3655 u8 bit0 = test_bit(i, pair_list);
3656
3657 if (bit0 ^ bit1) {
3658 u8 index;
3659
3660
3661 if (!bit0)
3662 index = i;
3663 else
3664 index = i + 1;
3665
3666
3667 if (first_free + 1 < (s8)ice_fd_pairs[index].count)
3668 return ICE_ERR_MAX_LIMIT;
3669
3670
3671 for (k = 0; k < ice_fd_pairs[index].count; k++) {
3672 es[first_free - k].prot_id =
3673 ice_fd_pairs[index].prot_id;
3674 es[first_free - k].off =
3675 ice_fd_pairs[index].off + (k * 2);
3676
3677 if (k > first_free)
3678 return ICE_ERR_OUT_OF_RANGE;
3679
3680
3681 mask_sel |= BIT(first_free - k);
3682 }
3683
3684 pair_start[index] = first_free;
3685 first_free -= ice_fd_pairs[index].count;
3686 }
3687 }
3688
3689
3690 si = hw->blk[ICE_BLK_FD].es.fvw - 1;
3691 while (si >= 0) {
3692 u8 indexes_used = 1;
3693
3694
3695#define ICE_SWAP_VALID 0x80
3696 used[si] = si | ICE_SWAP_VALID;
3697
3698 if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
3699 si -= indexes_used;
3700 continue;
3701 }
3702
3703
3704 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
3705 if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
3706 es[si].off == ice_fd_pairs[j].off) {
3707 u8 idx;
3708
3709
3710 idx = j + ((j % 2) ? -1 : 1);
3711
3712 indexes_used = ice_fd_pairs[idx].count;
3713 for (k = 0; k < indexes_used; k++) {
3714 used[si - k] = (pair_start[idx] - k) |
3715 ICE_SWAP_VALID;
3716 }
3717
3718 break;
3719 }
3720
3721 si -= indexes_used;
3722 }
3723
3724
3725
3726
3727 for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
3728 u32 raw_swap = 0;
3729 u32 raw_in = 0;
3730
3731 for (k = 0; k < 4; k++) {
3732 u8 idx;
3733
3734 idx = (j * 4) + k;
3735 if (used[idx] && !(mask_sel & BIT(idx))) {
3736 raw_swap |= used[idx] << (k * BITS_PER_BYTE);
3737#define ICE_INSET_DFLT 0x9f
3738 raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
3739 }
3740 }
3741
3742
3743 wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
3744
3745 ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
3746 prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
3747
3748
3749 wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
3750
3751 ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
3752 prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
3753 }
3754
3755
3756 ice_update_fd_mask(hw, prof_id, 0);
3757
3758 return 0;
3759}
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774enum ice_status
3775ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
3776 struct ice_fv_word *es)
3777{
3778 u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
3779 DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
3780 struct ice_prof_map *prof;
3781 enum ice_status status;
3782 u8 byte = 0;
3783 u8 prof_id;
3784
3785 bitmap_zero(ptgs_used, ICE_XLT1_CNT);
3786
3787 mutex_lock(&hw->blk[blk].es.prof_map_lock);
3788
3789
3790 status = ice_find_prof_id(hw, blk, es, &prof_id);
3791 if (status) {
3792
3793 status = ice_alloc_prof_id(hw, blk, &prof_id);
3794 if (status)
3795 goto err_ice_add_prof;
3796 if (blk == ICE_BLK_FD) {
3797
3798
3799
3800
3801
3802
3803
3804 status = ice_update_fd_swap(hw, prof_id, es);
3805 if (status)
3806 goto err_ice_add_prof;
3807 }
3808
3809
3810 ice_write_es(hw, blk, prof_id, es);
3811 }
3812
3813 ice_prof_inc_ref(hw, blk, prof_id);
3814
3815
3816 prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
3817 if (!prof) {
3818 status = ICE_ERR_NO_MEMORY;
3819 goto err_ice_add_prof;
3820 }
3821
3822 prof->profile_cookie = id;
3823 prof->prof_id = prof_id;
3824 prof->ptg_cnt = 0;
3825 prof->context = 0;
3826
3827
3828 while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
3829 u8 bit;
3830
3831 if (!ptypes[byte]) {
3832 bytes--;
3833 byte++;
3834 continue;
3835 }
3836
3837
3838 for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
3839 BITS_PER_BYTE) {
3840 u16 ptype;
3841 u8 ptg;
3842 u8 m;
3843
3844 ptype = byte * BITS_PER_BYTE + bit;
3845
3846
3847
3848
3849 if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
3850 continue;
3851
3852
3853 if (test_bit(ptg, ptgs_used))
3854 continue;
3855
3856 set_bit(ptg, ptgs_used);
3857 prof->ptg[prof->ptg_cnt] = ptg;
3858
3859 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
3860 break;
3861
3862
3863 m = ~(u8)((1 << (bit + 1)) - 1);
3864 if (!(ptypes[byte] & m))
3865 break;
3866 }
3867
3868 bytes--;
3869 byte++;
3870 }
3871
3872 list_add(&prof->list, &hw->blk[blk].es.prof_map);
3873 status = 0;
3874
3875err_ice_add_prof:
3876 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3877 return status;
3878}
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889static struct ice_prof_map *
3890ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
3891{
3892 struct ice_prof_map *entry = NULL;
3893 struct ice_prof_map *map;
3894
3895 list_for_each_entry(map, &hw->blk[blk].es.prof_map, list)
3896 if (map->profile_cookie == id) {
3897 entry = map;
3898 break;
3899 }
3900
3901 return entry;
3902}
3903
3904
3905
3906
3907
3908
3909
3910static u16
3911ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
3912{
3913 u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
3914 struct ice_vsig_prof *p;
3915
3916 list_for_each_entry(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3917 list)
3918 count++;
3919
3920 return count;
3921}
3922
3923
3924
3925
3926
3927
3928
3929static enum ice_status
3930ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
3931{
3932
3933 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3934 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
3935 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
3936 enum ice_status status;
3937
3938
3939 status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
3940 dc_msk, nm_msk);
3941 if (status)
3942 return status;
3943
3944
3945 status = ice_free_tcam_ent(hw, blk, idx);
3946
3947 return status;
3948}
3949
3950
3951
3952
3953
3954
3955
3956static enum ice_status
3957ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
3958 struct ice_vsig_prof *prof)
3959{
3960 enum ice_status status;
3961 u16 i;
3962
3963 for (i = 0; i < prof->tcam_count; i++)
3964 if (prof->tcam[i].in_use) {
3965 prof->tcam[i].in_use = false;
3966 status = ice_rel_tcam_idx(hw, blk,
3967 prof->tcam[i].tcam_idx);
3968 if (status)
3969 return ICE_ERR_HW_TABLE;
3970 }
3971
3972 return 0;
3973}
3974
3975
3976
3977
3978
3979
3980
3981
3982static enum ice_status
3983ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
3984 struct list_head *chg)
3985{
3986 u16 idx = vsig & ICE_VSIG_IDX_M;
3987 struct ice_vsig_vsi *vsi_cur;
3988 struct ice_vsig_prof *d, *t;
3989 enum ice_status status;
3990
3991
3992 list_for_each_entry_safe(d, t,
3993 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3994 list) {
3995 status = ice_rem_prof_id(hw, blk, d);
3996 if (status)
3997 return status;
3998
3999 list_del(&d->list);
4000 devm_kfree(ice_hw_to_dev(hw), d);
4001 }
4002
4003
4004 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
4005
4006
4007
4008 if (vsi_cur)
4009 do {
4010 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
4011 struct ice_chs_chg *p;
4012
4013 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
4014 GFP_KERNEL);
4015 if (!p)
4016 return ICE_ERR_NO_MEMORY;
4017
4018 p->type = ICE_VSIG_REM;
4019 p->orig_vsig = vsig;
4020 p->vsig = ICE_DEFAULT_VSIG;
4021 p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
4022
4023 list_add(&p->list_entry, chg);
4024
4025 vsi_cur = tmp;
4026 } while (vsi_cur);
4027
4028 return ice_vsig_free(hw, blk, vsig);
4029}
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039static enum ice_status
4040ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
4041 struct list_head *chg)
4042{
4043 u16 idx = vsig & ICE_VSIG_IDX_M;
4044 struct ice_vsig_prof *p, *t;
4045 enum ice_status status;
4046
4047 list_for_each_entry_safe(p, t,
4048 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4049 list)
4050 if (p->profile_cookie == hdl) {
4051 if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
4052
4053 return ice_rem_vsig(hw, blk, vsig, chg);
4054
4055 status = ice_rem_prof_id(hw, blk, p);
4056 if (!status) {
4057 list_del(&p->list);
4058 devm_kfree(ice_hw_to_dev(hw), p);
4059 }
4060 return status;
4061 }
4062
4063 return ICE_ERR_DOES_NOT_EXIST;
4064}
4065
4066
4067
4068
4069
4070
4071
4072static enum ice_status
4073ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
4074{
4075 struct ice_chs_chg *del, *tmp;
4076 enum ice_status status;
4077 struct list_head chg;
4078 u16 i;
4079
4080 INIT_LIST_HEAD(&chg);
4081
4082 for (i = 1; i < ICE_MAX_VSIGS; i++)
4083 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
4084 if (ice_has_prof_vsig(hw, blk, i, id)) {
4085 status = ice_rem_prof_id_vsig(hw, blk, i, id,
4086 &chg);
4087 if (status)
4088 goto err_ice_rem_flow_all;
4089 }
4090 }
4091
4092 status = ice_upd_prof_hw(hw, blk, &chg);
4093
4094err_ice_rem_flow_all:
4095 list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4096 list_del(&del->list_entry);
4097 devm_kfree(ice_hw_to_dev(hw), del);
4098 }
4099
4100 return status;
4101}
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
4114{
4115 struct ice_prof_map *pmap;
4116 enum ice_status status;
4117
4118 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4119
4120 pmap = ice_search_prof_id(hw, blk, id);
4121 if (!pmap) {
4122 status = ICE_ERR_DOES_NOT_EXIST;
4123 goto err_ice_rem_prof;
4124 }
4125
4126
4127 status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
4128 if (status)
4129 goto err_ice_rem_prof;
4130
4131
4132 ice_prof_dec_ref(hw, blk, pmap->prof_id);
4133
4134 list_del(&pmap->list);
4135 devm_kfree(ice_hw_to_dev(hw), pmap);
4136
4137err_ice_rem_prof:
4138 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4139 return status;
4140}
4141
4142
4143
4144
4145
4146
4147
4148
4149static enum ice_status
4150ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
4151 struct list_head *chg)
4152{
4153 enum ice_status status = 0;
4154 struct ice_prof_map *map;
4155 struct ice_chs_chg *p;
4156 u16 i;
4157
4158 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4159
4160 map = ice_search_prof_id(hw, blk, hdl);
4161 if (!map) {
4162 status = ICE_ERR_DOES_NOT_EXIST;
4163 goto err_ice_get_prof;
4164 }
4165
4166 for (i = 0; i < map->ptg_cnt; i++)
4167 if (!hw->blk[blk].es.written[map->prof_id]) {
4168
4169 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
4170 GFP_KERNEL);
4171 if (!p) {
4172 status = ICE_ERR_NO_MEMORY;
4173 goto err_ice_get_prof;
4174 }
4175
4176 p->type = ICE_PTG_ES_ADD;
4177 p->ptype = 0;
4178 p->ptg = map->ptg[i];
4179 p->add_ptg = 0;
4180
4181 p->add_prof = 1;
4182 p->prof_id = map->prof_id;
4183
4184 hw->blk[blk].es.written[map->prof_id] = true;
4185
4186 list_add(&p->list_entry, chg);
4187 }
4188
4189err_ice_get_prof:
4190 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4191
4192 return status;
4193}
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204static enum ice_status
4205ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4206 struct list_head *lst)
4207{
4208 struct ice_vsig_prof *ent1, *ent2;
4209 u16 idx = vsig & ICE_VSIG_IDX_M;
4210
4211 list_for_each_entry(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4212 list) {
4213 struct ice_vsig_prof *p;
4214
4215
4216 p = devm_kmemdup(ice_hw_to_dev(hw), ent1, sizeof(*p),
4217 GFP_KERNEL);
4218 if (!p)
4219 goto err_ice_get_profs_vsig;
4220
4221 list_add_tail(&p->list, lst);
4222 }
4223
4224 return 0;
4225
4226err_ice_get_profs_vsig:
4227 list_for_each_entry_safe(ent1, ent2, lst, list) {
4228 list_del(&ent1->list);
4229 devm_kfree(ice_hw_to_dev(hw), ent1);
4230 }
4231
4232 return ICE_ERR_NO_MEMORY;
4233}
4234
4235
4236
4237
4238
4239
4240
4241
4242static enum ice_status
4243ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
4244 struct list_head *lst, u64 hdl)
4245{
4246 enum ice_status status = 0;
4247 struct ice_prof_map *map;
4248 struct ice_vsig_prof *p;
4249 u16 i;
4250
4251 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4252 map = ice_search_prof_id(hw, blk, hdl);
4253 if (!map) {
4254 status = ICE_ERR_DOES_NOT_EXIST;
4255 goto err_ice_add_prof_to_lst;
4256 }
4257
4258 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4259 if (!p) {
4260 status = ICE_ERR_NO_MEMORY;
4261 goto err_ice_add_prof_to_lst;
4262 }
4263
4264 p->profile_cookie = map->profile_cookie;
4265 p->prof_id = map->prof_id;
4266 p->tcam_count = map->ptg_cnt;
4267
4268 for (i = 0; i < map->ptg_cnt; i++) {
4269 p->tcam[i].prof_id = map->prof_id;
4270 p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
4271 p->tcam[i].ptg = map->ptg[i];
4272 }
4273
4274 list_add(&p->list, lst);
4275
4276err_ice_add_prof_to_lst:
4277 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4278 return status;
4279}
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289static enum ice_status
4290ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
4291 struct list_head *chg)
4292{
4293 enum ice_status status;
4294 struct ice_chs_chg *p;
4295 u16 orig_vsig;
4296
4297 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4298 if (!p)
4299 return ICE_ERR_NO_MEMORY;
4300
4301 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
4302 if (!status)
4303 status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
4304
4305 if (status) {
4306 devm_kfree(ice_hw_to_dev(hw), p);
4307 return status;
4308 }
4309
4310 p->type = ICE_VSI_MOVE;
4311 p->vsi = vsi;
4312 p->orig_vsig = orig_vsig;
4313 p->vsig = vsig;
4314
4315 list_add(&p->list_entry, chg);
4316
4317 return 0;
4318}
4319
4320
4321
4322
4323
4324
4325
4326static void
4327ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
4328{
4329 struct ice_chs_chg *pos, *tmp;
4330
4331 list_for_each_entry_safe(tmp, pos, chg, list_entry)
4332 if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
4333 list_del(&tmp->list_entry);
4334 devm_kfree(ice_hw_to_dev(hw), tmp);
4335 }
4336}
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349static enum ice_status
4350ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
4351 u16 vsig, struct ice_tcam_inf *tcam,
4352 struct list_head *chg)
4353{
4354 enum ice_status status;
4355 struct ice_chs_chg *p;
4356
4357 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4358 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
4359 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
4360
4361
4362 if (!enable) {
4363 status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
4364
4365
4366
4367
4368
4369 ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
4370 tcam->tcam_idx = 0;
4371 tcam->in_use = 0;
4372 return status;
4373 }
4374
4375
4376 status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx);
4377 if (status)
4378 return status;
4379
4380
4381 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4382 if (!p)
4383 return ICE_ERR_NO_MEMORY;
4384
4385 status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
4386 tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
4387 nm_msk);
4388 if (status)
4389 goto err_ice_prof_tcam_ena_dis;
4390
4391 tcam->in_use = 1;
4392
4393 p->type = ICE_TCAM_ADD;
4394 p->add_tcam_idx = true;
4395 p->prof_id = tcam->prof_id;
4396 p->ptg = tcam->ptg;
4397 p->vsig = 0;
4398 p->tcam_idx = tcam->tcam_idx;
4399
4400
4401 list_add(&p->list_entry, chg);
4402
4403 return 0;
4404
4405err_ice_prof_tcam_ena_dis:
4406 devm_kfree(ice_hw_to_dev(hw), p);
4407 return status;
4408}
4409
4410
4411
4412
4413
4414
4415
4416
4417static enum ice_status
4418ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4419 struct list_head *chg)
4420{
4421 DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
4422 struct ice_vsig_prof *t;
4423 enum ice_status status;
4424 u16 idx;
4425
4426 bitmap_zero(ptgs_used, ICE_XLT1_CNT);
4427 idx = vsig & ICE_VSIG_IDX_M;
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438 list_for_each_entry(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4439 list) {
4440 u16 i;
4441
4442 for (i = 0; i < t->tcam_count; i++) {
4443
4444
4445
4446 if (test_bit(t->tcam[i].ptg, ptgs_used) &&
4447 t->tcam[i].in_use) {
4448
4449
4450
4451
4452 status = ice_prof_tcam_ena_dis(hw, blk, false,
4453 vsig,
4454 &t->tcam[i],
4455 chg);
4456 if (status)
4457 return status;
4458 } else if (!test_bit(t->tcam[i].ptg, ptgs_used) &&
4459 !t->tcam[i].in_use) {
4460
4461
4462
4463 status = ice_prof_tcam_ena_dis(hw, blk, true,
4464 vsig,
4465 &t->tcam[i],
4466 chg);
4467 if (status)
4468 return status;
4469 }
4470
4471
4472 set_bit(t->tcam[i].ptg, ptgs_used);
4473 }
4474 }
4475
4476 return 0;
4477}
4478
4479
4480
4481
4482
4483
4484
4485
4486
4487
4488static enum ice_status
4489ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
4490 bool rev, struct list_head *chg)
4491{
4492
4493 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4494 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
4495 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
4496 enum ice_status status = 0;
4497 struct ice_prof_map *map;
4498 struct ice_vsig_prof *t;
4499 struct ice_chs_chg *p;
4500 u16 vsig_idx, i;
4501
4502
4503 if (ice_has_prof_vsig(hw, blk, vsig, hdl))
4504 return ICE_ERR_ALREADY_EXISTS;
4505
4506
4507 t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL);
4508 if (!t)
4509 return ICE_ERR_NO_MEMORY;
4510
4511 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4512
4513 map = ice_search_prof_id(hw, blk, hdl);
4514 if (!map) {
4515 status = ICE_ERR_DOES_NOT_EXIST;
4516 goto err_ice_add_prof_id_vsig;
4517 }
4518
4519 t->profile_cookie = map->profile_cookie;
4520 t->prof_id = map->prof_id;
4521 t->tcam_count = map->ptg_cnt;
4522
4523
4524 for (i = 0; i < map->ptg_cnt; i++) {
4525 u16 tcam_idx;
4526
4527
4528 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4529 if (!p) {
4530 status = ICE_ERR_NO_MEMORY;
4531 goto err_ice_add_prof_id_vsig;
4532 }
4533
4534
4535 status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
4536 if (status) {
4537 devm_kfree(ice_hw_to_dev(hw), p);
4538 goto err_ice_add_prof_id_vsig;
4539 }
4540
4541 t->tcam[i].ptg = map->ptg[i];
4542 t->tcam[i].prof_id = map->prof_id;
4543 t->tcam[i].tcam_idx = tcam_idx;
4544 t->tcam[i].in_use = true;
4545
4546 p->type = ICE_TCAM_ADD;
4547 p->add_tcam_idx = true;
4548 p->prof_id = t->tcam[i].prof_id;
4549 p->ptg = t->tcam[i].ptg;
4550 p->vsig = vsig;
4551 p->tcam_idx = t->tcam[i].tcam_idx;
4552
4553
4554 status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
4555 t->tcam[i].prof_id,
4556 t->tcam[i].ptg, vsig, 0, 0,
4557 vl_msk, dc_msk, nm_msk);
4558 if (status) {
4559 devm_kfree(ice_hw_to_dev(hw), p);
4560 goto err_ice_add_prof_id_vsig;
4561 }
4562
4563
4564 list_add(&p->list_entry, chg);
4565 }
4566
4567
4568 vsig_idx = vsig & ICE_VSIG_IDX_M;
4569 if (rev)
4570 list_add_tail(&t->list,
4571 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
4572 else
4573 list_add(&t->list,
4574 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
4575
4576 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4577 return status;
4578
4579err_ice_add_prof_id_vsig:
4580 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4581
4582 devm_kfree(ice_hw_to_dev(hw), t);
4583 return status;
4584}
4585
4586
4587
4588
4589
4590
4591
4592
4593
4594static enum ice_status
4595ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
4596 struct list_head *chg)
4597{
4598 enum ice_status status;
4599 struct ice_chs_chg *p;
4600 u16 new_vsig;
4601
4602 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4603 if (!p)
4604 return ICE_ERR_NO_MEMORY;
4605
4606 new_vsig = ice_vsig_alloc(hw, blk);
4607 if (!new_vsig) {
4608 status = ICE_ERR_HW_TABLE;
4609 goto err_ice_create_prof_id_vsig;
4610 }
4611
4612 status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
4613 if (status)
4614 goto err_ice_create_prof_id_vsig;
4615
4616 status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
4617 if (status)
4618 goto err_ice_create_prof_id_vsig;
4619
4620 p->type = ICE_VSIG_ADD;
4621 p->vsi = vsi;
4622 p->orig_vsig = ICE_DEFAULT_VSIG;
4623 p->vsig = new_vsig;
4624
4625 list_add(&p->list_entry, chg);
4626
4627 return 0;
4628
4629err_ice_create_prof_id_vsig:
4630
4631 devm_kfree(ice_hw_to_dev(hw), p);
4632 return status;
4633}
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644static enum ice_status
4645ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
4646 struct list_head *lst, u16 *new_vsig,
4647 struct list_head *chg)
4648{
4649 struct ice_vsig_prof *t;
4650 enum ice_status status;
4651 u16 vsig;
4652
4653 vsig = ice_vsig_alloc(hw, blk);
4654 if (!vsig)
4655 return ICE_ERR_HW_TABLE;
4656
4657 status = ice_move_vsi(hw, blk, vsi, vsig, chg);
4658 if (status)
4659 return status;
4660
4661 list_for_each_entry(t, lst, list) {
4662
4663 status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
4664 true, chg);
4665 if (status)
4666 return status;
4667 }
4668
4669 *new_vsig = vsig;
4670
4671 return 0;
4672}
4673
4674
4675
4676
4677
4678
4679
4680
4681static bool
4682ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
4683{
4684 struct ice_vsig_prof *t;
4685 enum ice_status status;
4686 struct list_head lst;
4687
4688 INIT_LIST_HEAD(&lst);
4689
4690 t = kzalloc(sizeof(*t), GFP_KERNEL);
4691 if (!t)
4692 return false;
4693
4694 t->profile_cookie = hdl;
4695 list_add(&t->list, &lst);
4696
4697 status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
4698
4699 list_del(&t->list);
4700 kfree(t);
4701
4702 return !status;
4703}
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713
4714
4715
4716enum ice_status
4717ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
4718{
4719 struct ice_vsig_prof *tmp1, *del1;
4720 struct ice_chs_chg *tmp, *del;
4721 struct list_head union_lst;
4722 enum ice_status status;
4723 struct list_head chg;
4724 u16 vsig;
4725
4726 INIT_LIST_HEAD(&union_lst);
4727 INIT_LIST_HEAD(&chg);
4728
4729
4730 status = ice_get_prof(hw, blk, hdl, &chg);
4731 if (status)
4732 return status;
4733
4734
4735 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
4736 if (!status && vsig) {
4737 bool only_vsi;
4738 u16 or_vsig;
4739 u16 ref;
4740
4741
4742 or_vsig = vsig;
4743
4744
4745
4746
4747
4748 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
4749 status = ICE_ERR_ALREADY_EXISTS;
4750 goto err_ice_add_prof_id_flow;
4751 }
4752
4753
4754 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
4755 if (status)
4756 goto err_ice_add_prof_id_flow;
4757 only_vsi = (ref == 1);
4758
4759
4760
4761
4762 status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
4763 if (status)
4764 goto err_ice_add_prof_id_flow;
4765
4766 status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
4767 if (status)
4768 goto err_ice_add_prof_id_flow;
4769
4770
4771 status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
4772 if (!status) {
4773
4774 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
4775 if (status)
4776 goto err_ice_add_prof_id_flow;
4777
4778
4779
4780
4781 if (only_vsi) {
4782 status = ice_rem_vsig(hw, blk, or_vsig, &chg);
4783 if (status)
4784 goto err_ice_add_prof_id_flow;
4785 }
4786 } else if (only_vsi) {
4787
4788
4789
4790
4791
4792 status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
4793 &chg);
4794 if (status)
4795 goto err_ice_add_prof_id_flow;
4796
4797
4798 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
4799 if (status)
4800 goto err_ice_add_prof_id_flow;
4801 } else {
4802
4803 status = ice_create_vsig_from_lst(hw, blk, vsi,
4804 &union_lst, &vsig,
4805 &chg);
4806 if (status)
4807 goto err_ice_add_prof_id_flow;
4808
4809
4810 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
4811 if (status)
4812 goto err_ice_add_prof_id_flow;
4813 }
4814 } else {
4815
4816
4817 if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
4818
4819
4820 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
4821 if (status)
4822 goto err_ice_add_prof_id_flow;
4823 } else {
4824
4825
4826 status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
4827 &chg);
4828 if (status)
4829 goto err_ice_add_prof_id_flow;
4830 }
4831 }
4832
4833
4834 if (!status)
4835 status = ice_upd_prof_hw(hw, blk, &chg);
4836
4837err_ice_add_prof_id_flow:
4838 list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4839 list_del(&del->list_entry);
4840 devm_kfree(ice_hw_to_dev(hw), del);
4841 }
4842
4843 list_for_each_entry_safe(del1, tmp1, &union_lst, list) {
4844 list_del(&del1->list);
4845 devm_kfree(ice_hw_to_dev(hw), del1);
4846 }
4847
4848 return status;
4849}
4850
4851
4852
4853
4854
4855
4856
4857static enum ice_status
4858ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
4859{
4860 struct ice_vsig_prof *ent, *tmp;
4861
4862 list_for_each_entry_safe(ent, tmp, lst, list)
4863 if (ent->profile_cookie == hdl) {
4864 list_del(&ent->list);
4865 devm_kfree(ice_hw_to_dev(hw), ent);
4866 return 0;
4867 }
4868
4869 return ICE_ERR_DOES_NOT_EXIST;
4870}
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883enum ice_status
4884ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
4885{
4886 struct ice_vsig_prof *tmp1, *del1;
4887 struct ice_chs_chg *tmp, *del;
4888 struct list_head chg, copy;
4889 enum ice_status status;
4890 u16 vsig;
4891
4892 INIT_LIST_HEAD(©);
4893 INIT_LIST_HEAD(&chg);
4894
4895
4896 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
4897 if (!status && vsig) {
4898 bool last_profile;
4899 bool only_vsi;
4900 u16 ref;
4901
4902
4903 last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
4904 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
4905 if (status)
4906 goto err_ice_rem_prof_id_flow;
4907 only_vsi = (ref == 1);
4908
4909 if (only_vsi) {
4910
4911
4912
4913
4914
4915
4916 if (last_profile) {
4917
4918
4919
4920 status = ice_rem_vsig(hw, blk, vsig, &chg);
4921 if (status)
4922 goto err_ice_rem_prof_id_flow;
4923 } else {
4924 status = ice_rem_prof_id_vsig(hw, blk, vsig,
4925 hdl, &chg);
4926 if (status)
4927 goto err_ice_rem_prof_id_flow;
4928
4929
4930 status = ice_adj_prof_priorities(hw, blk, vsig,
4931 &chg);
4932 if (status)
4933 goto err_ice_rem_prof_id_flow;
4934 }
4935
4936 } else {
4937
4938 status = ice_get_profs_vsig(hw, blk, vsig, ©);
4939 if (status)
4940 goto err_ice_rem_prof_id_flow;
4941
4942
4943 status = ice_rem_prof_from_list(hw, ©, hdl);
4944 if (status)
4945 goto err_ice_rem_prof_id_flow;
4946
4947 if (list_empty(©)) {
4948 status = ice_move_vsi(hw, blk, vsi,
4949 ICE_DEFAULT_VSIG, &chg);
4950 if (status)
4951 goto err_ice_rem_prof_id_flow;
4952
4953 } else if (!ice_find_dup_props_vsig(hw, blk, ©,
4954 &vsig)) {
4955
4956
4957
4958
4959
4960
4961
4962 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
4963 if (status)
4964 goto err_ice_rem_prof_id_flow;
4965 } else {
4966
4967
4968
4969
4970 status = ice_create_vsig_from_lst(hw, blk, vsi,
4971 ©, &vsig,
4972 &chg);
4973 if (status)
4974 goto err_ice_rem_prof_id_flow;
4975
4976
4977 status = ice_adj_prof_priorities(hw, blk, vsig,
4978 &chg);
4979 if (status)
4980 goto err_ice_rem_prof_id_flow;
4981 }
4982 }
4983 } else {
4984 status = ICE_ERR_DOES_NOT_EXIST;
4985 }
4986
4987
4988 if (!status)
4989 status = ice_upd_prof_hw(hw, blk, &chg);
4990
4991err_ice_rem_prof_id_flow:
4992 list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4993 list_del(&del->list_entry);
4994 devm_kfree(ice_hw_to_dev(hw), del);
4995 }
4996
4997 list_for_each_entry_safe(del1, tmp1, ©, list) {
4998 list_del(&del1->list);
4999 devm_kfree(ice_hw_to_dev(hw), del1);
5000 }
5001
5002 return status;
5003}
5004