1
2
3
4#include "ice_lib.h"
5#include "ice_switch.h"
6
7#define ICE_ETH_DA_OFFSET 0
8#define ICE_ETH_ETHTYPE_OFFSET 12
9#define ICE_ETH_VLAN_TCI_OFFSET 14
10#define ICE_MAX_VLAN_ID 0xFFF
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#define DUMMY_ETH_HDR_LEN 16
28static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
29 0x2, 0, 0, 0, 0, 0,
30 0x81, 0, 0, 0};
31
32#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
33 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
34 (DUMMY_ETH_HDR_LEN * \
35 sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0])))
36#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
37 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
38#define ICE_SW_RULE_LG_ACT_SIZE(n) \
39 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \
40 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0])))
41#define ICE_SW_RULE_VSI_LIST_SIZE(n) \
42 (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
43 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
44
45
46
47
48
49
50
51
52enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
53{
54 struct ice_sw_recipe *recps;
55 u8 i;
56
57 recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
58 sizeof(*recps), GFP_KERNEL);
59 if (!recps)
60 return ICE_ERR_NO_MEMORY;
61
62 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
63 recps[i].root_rid = i;
64 INIT_LIST_HEAD(&recps[i].filt_rules);
65 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
66 mutex_init(&recps[i].filt_rule_lock);
67 }
68
69 hw->switch_info->recp_list = recps;
70
71 return 0;
72}
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99static enum ice_status
100ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
101 u16 buf_size, u16 *req_desc, u16 *num_elems,
102 struct ice_sq_cd *cd)
103{
104 struct ice_aqc_get_sw_cfg *cmd;
105 struct ice_aq_desc desc;
106 enum ice_status status;
107
108 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
109 cmd = &desc.params.get_sw_conf;
110 cmd->element = cpu_to_le16(*req_desc);
111
112 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
113 if (!status) {
114 *req_desc = le16_to_cpu(cmd->element);
115 *num_elems = le16_to_cpu(cmd->num_elems);
116 }
117
118 return status;
119}
120
121
122
123
124
125
126
127
128
129static enum ice_status
130ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
131 struct ice_sq_cd *cd)
132{
133 struct ice_aqc_add_update_free_vsi_resp *res;
134 struct ice_aqc_add_get_update_free_vsi *cmd;
135 struct ice_aq_desc desc;
136 enum ice_status status;
137
138 cmd = &desc.params.vsi_cmd;
139 res = &desc.params.add_update_free_vsi_res;
140
141 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
142
143 if (!vsi_ctx->alloc_from_pool)
144 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
145 ICE_AQ_VSI_IS_VALID);
146 cmd->vf_id = vsi_ctx->vf_num;
147
148 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
149
150 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
151
152 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
153 sizeof(vsi_ctx->info), cd);
154
155 if (!status) {
156 vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
157 vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
158 vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
159 }
160
161 return status;
162}
163
164
165
166
167
168
169
170
171
172
173static enum ice_status
174ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
175 bool keep_vsi_alloc, struct ice_sq_cd *cd)
176{
177 struct ice_aqc_add_update_free_vsi_resp *resp;
178 struct ice_aqc_add_get_update_free_vsi *cmd;
179 struct ice_aq_desc desc;
180 enum ice_status status;
181
182 cmd = &desc.params.vsi_cmd;
183 resp = &desc.params.add_update_free_vsi_res;
184
185 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
186
187 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
188 if (keep_vsi_alloc)
189 cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
190
191 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
192 if (!status) {
193 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
194 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
195 }
196
197 return status;
198}
199
200
201
202
203
204
205
206
207
208static enum ice_status
209ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
210 struct ice_sq_cd *cd)
211{
212 struct ice_aqc_add_update_free_vsi_resp *resp;
213 struct ice_aqc_add_get_update_free_vsi *cmd;
214 struct ice_aq_desc desc;
215 enum ice_status status;
216
217 cmd = &desc.params.vsi_cmd;
218 resp = &desc.params.add_update_free_vsi_res;
219
220 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
221
222 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
223
224 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
225
226 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
227 sizeof(vsi_ctx->info), cd);
228
229 if (!status) {
230 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
231 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
232 }
233
234 return status;
235}
236
237
238
239
240
241
242
243
244bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
245{
246 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
247}
248
249
250
251
252
253
254
255
256
257u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
258{
259 return hw->vsi_ctx[vsi_handle]->vsi_num;
260}
261
262
263
264
265
266
267
268
269struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
270{
271 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
272}
273
274
275
276
277
278
279
280
281
282static void
283ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
284{
285 hw->vsi_ctx[vsi_handle] = vsi;
286}
287
288
289
290
291
292
293static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
294{
295 struct ice_vsi_ctx *vsi;
296 u8 i;
297
298 vsi = ice_get_vsi_ctx(hw, vsi_handle);
299 if (!vsi)
300 return;
301 ice_for_each_traffic_class(i) {
302 if (vsi->lan_q_ctx[i]) {
303 devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
304 vsi->lan_q_ctx[i] = NULL;
305 }
306 if (vsi->rdma_q_ctx[i]) {
307 devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
308 vsi->rdma_q_ctx[i] = NULL;
309 }
310 }
311}
312
313
314
315
316
317
318
319
320static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
321{
322 struct ice_vsi_ctx *vsi;
323
324 vsi = ice_get_vsi_ctx(hw, vsi_handle);
325 if (vsi) {
326 ice_clear_vsi_q_ctx(hw, vsi_handle);
327 devm_kfree(ice_hw_to_dev(hw), vsi);
328 hw->vsi_ctx[vsi_handle] = NULL;
329 }
330}
331
332
333
334
335
336void ice_clear_all_vsi_ctx(struct ice_hw *hw)
337{
338 u16 i;
339
340 for (i = 0; i < ICE_MAX_VSI; i++)
341 ice_clear_vsi_ctx(hw, i);
342}
343
344
345
346
347
348
349
350
351
352
353
354
355enum ice_status
356ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
357 struct ice_sq_cd *cd)
358{
359 struct ice_vsi_ctx *tmp_vsi_ctx;
360 enum ice_status status;
361
362 if (vsi_handle >= ICE_MAX_VSI)
363 return ICE_ERR_PARAM;
364 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
365 if (status)
366 return status;
367 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
368 if (!tmp_vsi_ctx) {
369
370 tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
371 sizeof(*tmp_vsi_ctx), GFP_KERNEL);
372 if (!tmp_vsi_ctx) {
373 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
374 return ICE_ERR_NO_MEMORY;
375 }
376 *tmp_vsi_ctx = *vsi_ctx;
377 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
378 } else {
379
380 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
381 }
382
383 return 0;
384}
385
386
387
388
389
390
391
392
393
394
395
396enum ice_status
397ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
398 bool keep_vsi_alloc, struct ice_sq_cd *cd)
399{
400 enum ice_status status;
401
402 if (!ice_is_vsi_valid(hw, vsi_handle))
403 return ICE_ERR_PARAM;
404 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
405 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
406 if (!status)
407 ice_clear_vsi_ctx(hw, vsi_handle);
408 return status;
409}
410
411
412
413
414
415
416
417
418
419
420enum ice_status
421ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
422 struct ice_sq_cd *cd)
423{
424 if (!ice_is_vsi_valid(hw, vsi_handle))
425 return ICE_ERR_PARAM;
426 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
427 return ice_aq_update_vsi(hw, vsi_ctx, cd);
428}
429
430
431
432
433
434
435
436int
437ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
438{
439 struct ice_vsi_ctx *ctx;
440
441 ctx = ice_get_vsi_ctx(hw, vsi_handle);
442 if (!ctx)
443 return -EIO;
444
445 if (enable)
446 ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
447 else
448 ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
449
450 return ice_status_to_errno(ice_update_vsi(hw, vsi_handle, ctx, NULL));
451}
452
453
454
455
456
457
458
459
460
461
462static enum ice_status
463ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
464 enum ice_sw_lkup_type lkup_type,
465 enum ice_adminq_opc opc)
466{
467 struct ice_aqc_alloc_free_res_elem *sw_buf;
468 struct ice_aqc_res_elem *vsi_ele;
469 enum ice_status status;
470 u16 buf_len;
471
472 buf_len = struct_size(sw_buf, elem, 1);
473 sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
474 if (!sw_buf)
475 return ICE_ERR_NO_MEMORY;
476 sw_buf->num_elems = cpu_to_le16(1);
477
478 if (lkup_type == ICE_SW_LKUP_MAC ||
479 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
480 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
481 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
482 lkup_type == ICE_SW_LKUP_PROMISC ||
483 lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
484 sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
485 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
486 sw_buf->res_type =
487 cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
488 } else {
489 status = ICE_ERR_PARAM;
490 goto ice_aq_alloc_free_vsi_list_exit;
491 }
492
493 if (opc == ice_aqc_opc_free_res)
494 sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
495
496 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
497 if (status)
498 goto ice_aq_alloc_free_vsi_list_exit;
499
500 if (opc == ice_aqc_opc_alloc_res) {
501 vsi_ele = &sw_buf->elem[0];
502 *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
503 }
504
505ice_aq_alloc_free_vsi_list_exit:
506 devm_kfree(ice_hw_to_dev(hw), sw_buf);
507 return status;
508}
509
510
511
512
513
514
515
516
517
518
519
520
521static enum ice_status
522ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
523 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
524{
525 struct ice_aq_desc desc;
526 enum ice_status status;
527
528 if (opc != ice_aqc_opc_add_sw_rules &&
529 opc != ice_aqc_opc_update_sw_rules &&
530 opc != ice_aqc_opc_remove_sw_rules)
531 return ICE_ERR_PARAM;
532
533 ice_fill_dflt_direct_cmd_desc(&desc, opc);
534
535 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
536 desc.params.sw_rules.num_rules_fltr_entry_index =
537 cpu_to_le16(num_rules);
538 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
539 if (opc != ice_aqc_opc_add_sw_rules &&
540 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
541 status = ICE_ERR_DOES_NOT_EXIST;
542
543 return status;
544}
545
546
547
548
549
550
551
552
553
554static void
555ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
556 u16 swid, u16 pf_vf_num, bool is_vf)
557{
558 switch (type) {
559 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
560 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
561 pi->sw_id = swid;
562 pi->pf_vf_num = pf_vf_num;
563 pi->is_vf = is_vf;
564 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
565 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
566 break;
567 default:
568 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
569 break;
570 }
571}
572
573
574
575
576enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
577{
578 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
579 enum ice_status status;
580 u16 req_desc = 0;
581 u16 num_elems;
582 u16 i;
583
584 rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
585 GFP_KERNEL);
586
587 if (!rbuf)
588 return ICE_ERR_NO_MEMORY;
589
590
591
592
593
594
595 do {
596 struct ice_aqc_get_sw_cfg_resp_elem *ele;
597
598 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
599 &req_desc, &num_elems, NULL);
600
601 if (status)
602 break;
603
604 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
605 u16 pf_vf_num, swid, vsi_port_num;
606 bool is_vf = false;
607 u8 res_type;
608
609 vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
610 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
611
612 pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
613 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
614
615 swid = le16_to_cpu(ele->swid);
616
617 if (le16_to_cpu(ele->pf_vf_num) &
618 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
619 is_vf = true;
620
621 res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
622 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
623
624 if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
625
626 continue;
627 }
628
629 ice_init_port_info(hw->port_info, vsi_port_num,
630 res_type, swid, pf_vf_num, is_vf);
631 }
632 } while (req_desc && !status);
633
634 devm_kfree(ice_hw_to_dev(hw), rbuf);
635 return status;
636}
637
638
639
640
641
642
643
644
645
646
647static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
648{
649 fi->lb_en = false;
650 fi->lan_en = false;
651 if ((fi->flag & ICE_FLTR_TX) &&
652 (fi->fltr_act == ICE_FWD_TO_VSI ||
653 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
654 fi->fltr_act == ICE_FWD_TO_Q ||
655 fi->fltr_act == ICE_FWD_TO_QGRP)) {
656
657
658
659 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
660 fi->lb_en = true;
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678 if (hw->evb_veb) {
679 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
680 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
681 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
682 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
683 fi->lkup_type == ICE_SW_LKUP_DFLT ||
684 fi->lkup_type == ICE_SW_LKUP_VLAN ||
685 (fi->lkup_type == ICE_SW_LKUP_MAC &&
686 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
687 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
688 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
689 fi->lan_en = true;
690 } else {
691 fi->lan_en = true;
692 }
693 }
694}
695
696
697
698
699
700
701
702
703static void
704ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
705 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
706{
707 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
708 void *daddr = NULL;
709 u16 eth_hdr_sz;
710 u8 *eth_hdr;
711 u32 act = 0;
712 __be16 *off;
713 u8 q_rgn;
714
715 if (opc == ice_aqc_opc_remove_sw_rules) {
716 s_rule->pdata.lkup_tx_rx.act = 0;
717 s_rule->pdata.lkup_tx_rx.index =
718 cpu_to_le16(f_info->fltr_rule_id);
719 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
720 return;
721 }
722
723 eth_hdr_sz = sizeof(dummy_eth_header);
724 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
725
726
727 memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
728 ice_fill_sw_info(hw, f_info);
729
730 switch (f_info->fltr_act) {
731 case ICE_FWD_TO_VSI:
732 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
733 ICE_SINGLE_ACT_VSI_ID_M;
734 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
735 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
736 ICE_SINGLE_ACT_VALID_BIT;
737 break;
738 case ICE_FWD_TO_VSI_LIST:
739 act |= ICE_SINGLE_ACT_VSI_LIST;
740 act |= (f_info->fwd_id.vsi_list_id <<
741 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
742 ICE_SINGLE_ACT_VSI_LIST_ID_M;
743 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
744 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
745 ICE_SINGLE_ACT_VALID_BIT;
746 break;
747 case ICE_FWD_TO_Q:
748 act |= ICE_SINGLE_ACT_TO_Q;
749 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
750 ICE_SINGLE_ACT_Q_INDEX_M;
751 break;
752 case ICE_DROP_PACKET:
753 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
754 ICE_SINGLE_ACT_VALID_BIT;
755 break;
756 case ICE_FWD_TO_QGRP:
757 q_rgn = f_info->qgrp_size > 0 ?
758 (u8)ilog2(f_info->qgrp_size) : 0;
759 act |= ICE_SINGLE_ACT_TO_Q;
760 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
761 ICE_SINGLE_ACT_Q_INDEX_M;
762 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
763 ICE_SINGLE_ACT_Q_REGION_M;
764 break;
765 default:
766 return;
767 }
768
769 if (f_info->lb_en)
770 act |= ICE_SINGLE_ACT_LB_ENABLE;
771 if (f_info->lan_en)
772 act |= ICE_SINGLE_ACT_LAN_ENABLE;
773
774 switch (f_info->lkup_type) {
775 case ICE_SW_LKUP_MAC:
776 daddr = f_info->l_data.mac.mac_addr;
777 break;
778 case ICE_SW_LKUP_VLAN:
779 vlan_id = f_info->l_data.vlan.vlan_id;
780 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
781 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
782 act |= ICE_SINGLE_ACT_PRUNE;
783 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
784 }
785 break;
786 case ICE_SW_LKUP_ETHERTYPE_MAC:
787 daddr = f_info->l_data.ethertype_mac.mac_addr;
788 fallthrough;
789 case ICE_SW_LKUP_ETHERTYPE:
790 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
791 *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
792 break;
793 case ICE_SW_LKUP_MAC_VLAN:
794 daddr = f_info->l_data.mac_vlan.mac_addr;
795 vlan_id = f_info->l_data.mac_vlan.vlan_id;
796 break;
797 case ICE_SW_LKUP_PROMISC_VLAN:
798 vlan_id = f_info->l_data.mac_vlan.vlan_id;
799 fallthrough;
800 case ICE_SW_LKUP_PROMISC:
801 daddr = f_info->l_data.mac_vlan.mac_addr;
802 break;
803 default:
804 break;
805 }
806
807 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
808 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
809 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
810
811
812 s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
813 s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
814 s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
815
816 if (daddr)
817 ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
818
819 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
820 off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
821 *off = cpu_to_be16(vlan_id);
822 }
823
824
825 if (opc != ice_aqc_opc_update_sw_rules)
826 s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz);
827}
828
829
830
831
832
833
834
835
836
837
838
839static enum ice_status
840ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
841 u16 sw_marker, u16 l_id)
842{
843 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
844
845
846
847
848
849 const u16 num_lg_acts = 3;
850 enum ice_status status;
851 u16 lg_act_size;
852 u16 rules_size;
853 u32 act;
854 u16 id;
855
856 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
857 return ICE_ERR_PARAM;
858
859
860
861
862
863
864 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
865 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
866 lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
867 if (!lg_act)
868 return ICE_ERR_NO_MEMORY;
869
870 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
871
872
873 lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
874 lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
875 lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
876
877
878
879
880 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
881 m_ent->fltr_info.fwd_id.hw_vsi_id;
882
883 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
884 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
885 if (m_ent->vsi_count > 1)
886 act |= ICE_LG_ACT_VSI_LIST;
887 lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
888
889
890 act = ICE_LG_ACT_GENERIC;
891
892 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
893 lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
894
895 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
896 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
897
898
899 act |= ICE_LG_ACT_GENERIC;
900 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
901 ICE_LG_ACT_GENERIC_VALUE_M;
902
903 lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
904
905
906 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
907 ice_aqc_opc_update_sw_rules);
908
909
910 rx_tx->pdata.lkup_tx_rx.act =
911 cpu_to_le32(ICE_SINGLE_ACT_PTR |
912 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
913 ICE_SINGLE_ACT_PTR_VAL_M));
914
915
916
917
918
919 rx_tx->pdata.lkup_tx_rx.index =
920 cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
921
922 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
923 ice_aqc_opc_update_sw_rules, NULL);
924 if (!status) {
925 m_ent->lg_act_idx = l_id;
926 m_ent->sw_marker_id = sw_marker;
927 }
928
929 devm_kfree(ice_hw_to_dev(hw), lg_act);
930 return status;
931}
932
933
934
935
936
937
938
939
940
941
942
943static struct ice_vsi_list_map_info *
944ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
945 u16 vsi_list_id)
946{
947 struct ice_switch_info *sw = hw->switch_info;
948 struct ice_vsi_list_map_info *v_map;
949 int i;
950
951 v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL);
952 if (!v_map)
953 return NULL;
954
955 v_map->vsi_list_id = vsi_list_id;
956 v_map->ref_cnt = 1;
957 for (i = 0; i < num_vsi; i++)
958 set_bit(vsi_handle_arr[i], v_map->vsi_map);
959
960 list_add(&v_map->list_entry, &sw->vsi_list_map_head);
961 return v_map;
962}
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977static enum ice_status
978ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
979 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
980 enum ice_sw_lkup_type lkup_type)
981{
982 struct ice_aqc_sw_rules_elem *s_rule;
983 enum ice_status status;
984 u16 s_rule_size;
985 u16 rule_type;
986 int i;
987
988 if (!num_vsi)
989 return ICE_ERR_PARAM;
990
991 if (lkup_type == ICE_SW_LKUP_MAC ||
992 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
993 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
994 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
995 lkup_type == ICE_SW_LKUP_PROMISC ||
996 lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
997 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
998 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
999 else if (lkup_type == ICE_SW_LKUP_VLAN)
1000 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
1001 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
1002 else
1003 return ICE_ERR_PARAM;
1004
1005 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
1006 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
1007 if (!s_rule)
1008 return ICE_ERR_NO_MEMORY;
1009 for (i = 0; i < num_vsi; i++) {
1010 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
1011 status = ICE_ERR_PARAM;
1012 goto exit;
1013 }
1014
1015 s_rule->pdata.vsi_list.vsi[i] =
1016 cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
1017 }
1018
1019 s_rule->type = cpu_to_le16(rule_type);
1020 s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
1021 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
1022
1023 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
1024
1025exit:
1026 devm_kfree(ice_hw_to_dev(hw), s_rule);
1027 return status;
1028}
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038static enum ice_status
1039ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1040 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
1041{
1042 enum ice_status status;
1043
1044 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
1045 ice_aqc_opc_alloc_res);
1046 if (status)
1047 return status;
1048
1049
1050 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
1051 *vsi_list_id, false,
1052 ice_aqc_opc_add_sw_rules, lkup_type);
1053}
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064static enum ice_status
1065ice_create_pkt_fwd_rule(struct ice_hw *hw,
1066 struct ice_fltr_list_entry *f_entry)
1067{
1068 struct ice_fltr_mgmt_list_entry *fm_entry;
1069 struct ice_aqc_sw_rules_elem *s_rule;
1070 enum ice_sw_lkup_type l_type;
1071 struct ice_sw_recipe *recp;
1072 enum ice_status status;
1073
1074 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1075 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
1076 if (!s_rule)
1077 return ICE_ERR_NO_MEMORY;
1078 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
1079 GFP_KERNEL);
1080 if (!fm_entry) {
1081 status = ICE_ERR_NO_MEMORY;
1082 goto ice_create_pkt_fwd_rule_exit;
1083 }
1084
1085 fm_entry->fltr_info = f_entry->fltr_info;
1086
1087
1088 fm_entry->vsi_count = 1;
1089 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
1090 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
1091 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
1092
1093 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
1094 ice_aqc_opc_add_sw_rules);
1095
1096 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1097 ice_aqc_opc_add_sw_rules, NULL);
1098 if (status) {
1099 devm_kfree(ice_hw_to_dev(hw), fm_entry);
1100 goto ice_create_pkt_fwd_rule_exit;
1101 }
1102
1103 f_entry->fltr_info.fltr_rule_id =
1104 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
1105 fm_entry->fltr_info.fltr_rule_id =
1106 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
1107
1108
1109
1110
1111 l_type = fm_entry->fltr_info.lkup_type;
1112 recp = &hw->switch_info->recp_list[l_type];
1113 list_add(&fm_entry->list_entry, &recp->filt_rules);
1114
1115ice_create_pkt_fwd_rule_exit:
1116 devm_kfree(ice_hw_to_dev(hw), s_rule);
1117 return status;
1118}
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128static enum ice_status
1129ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
1130{
1131 struct ice_aqc_sw_rules_elem *s_rule;
1132 enum ice_status status;
1133
1134 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1135 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
1136 if (!s_rule)
1137 return ICE_ERR_NO_MEMORY;
1138
1139 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
1140
1141 s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id);
1142
1143
1144 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1145 ice_aqc_opc_update_sw_rules, NULL);
1146
1147 devm_kfree(ice_hw_to_dev(hw), s_rule);
1148 return status;
1149}
1150
1151
1152
1153
1154
1155
1156
1157enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
1158{
1159 struct ice_switch_info *sw = hw->switch_info;
1160 struct ice_fltr_mgmt_list_entry *fm_entry;
1161 enum ice_status status = 0;
1162 struct list_head *rule_head;
1163 struct mutex *rule_lock;
1164
1165 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
1166 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
1167
1168 mutex_lock(rule_lock);
1169 list_for_each_entry(fm_entry, rule_head, list_entry) {
1170 struct ice_fltr_info *fi = &fm_entry->fltr_info;
1171 u8 *addr = fi->l_data.mac.mac_addr;
1172
1173
1174
1175
1176 if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
1177 (fi->fltr_act == ICE_FWD_TO_VSI ||
1178 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1179 fi->fltr_act == ICE_FWD_TO_Q ||
1180 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1181 status = ice_update_pkt_fwd_rule(hw, fi);
1182 if (status)
1183 break;
1184 }
1185 }
1186
1187 mutex_unlock(rule_lock);
1188
1189 return status;
1190}
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213static enum ice_status
1214ice_add_update_vsi_list(struct ice_hw *hw,
1215 struct ice_fltr_mgmt_list_entry *m_entry,
1216 struct ice_fltr_info *cur_fltr,
1217 struct ice_fltr_info *new_fltr)
1218{
1219 enum ice_status status = 0;
1220 u16 vsi_list_id = 0;
1221
1222 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
1223 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
1224 return ICE_ERR_NOT_IMPL;
1225
1226 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
1227 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
1228 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
1229 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
1230 return ICE_ERR_NOT_IMPL;
1231
1232 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
1233
1234
1235
1236
1237 struct ice_fltr_info tmp_fltr;
1238 u16 vsi_handle_arr[2];
1239
1240
1241 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
1242 return ICE_ERR_ALREADY_EXISTS;
1243
1244 vsi_handle_arr[0] = cur_fltr->vsi_handle;
1245 vsi_handle_arr[1] = new_fltr->vsi_handle;
1246 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
1247 &vsi_list_id,
1248 new_fltr->lkup_type);
1249 if (status)
1250 return status;
1251
1252 tmp_fltr = *new_fltr;
1253 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
1254 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
1255 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
1256
1257
1258
1259 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
1260 if (status)
1261 return status;
1262
1263 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
1264 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
1265 m_entry->vsi_list_info =
1266 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
1267 vsi_list_id);
1268
1269 if (!m_entry->vsi_list_info)
1270 return ICE_ERR_NO_MEMORY;
1271
1272
1273
1274
1275 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
1276 status =
1277 ice_add_marker_act(hw, m_entry,
1278 m_entry->sw_marker_id,
1279 m_entry->lg_act_idx);
1280 } else {
1281 u16 vsi_handle = new_fltr->vsi_handle;
1282 enum ice_adminq_opc opcode;
1283
1284 if (!m_entry->vsi_list_info)
1285 return ICE_ERR_CFG;
1286
1287
1288 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
1289 return 0;
1290
1291
1292
1293
1294 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
1295 opcode = ice_aqc_opc_update_sw_rules;
1296
1297 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
1298 vsi_list_id, false, opcode,
1299 new_fltr->lkup_type);
1300
1301 if (!status)
1302 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
1303 }
1304 if (!status)
1305 m_entry->vsi_count++;
1306 return status;
1307}
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318static struct ice_fltr_mgmt_list_entry *
1319ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
1320{
1321 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
1322 struct ice_switch_info *sw = hw->switch_info;
1323 struct list_head *list_head;
1324
1325 list_head = &sw->recp_list[recp_id].filt_rules;
1326 list_for_each_entry(list_itr, list_head, list_entry) {
1327 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
1328 sizeof(f_info->l_data)) &&
1329 f_info->flag == list_itr->fltr_info.flag) {
1330 ret = list_itr;
1331 break;
1332 }
1333 }
1334 return ret;
1335}
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348static struct ice_vsi_list_map_info *
1349ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
1350 u16 *vsi_list_id)
1351{
1352 struct ice_vsi_list_map_info *map_info = NULL;
1353 struct ice_switch_info *sw = hw->switch_info;
1354 struct ice_fltr_mgmt_list_entry *list_itr;
1355 struct list_head *list_head;
1356
1357 list_head = &sw->recp_list[recp_id].filt_rules;
1358 list_for_each_entry(list_itr, list_head, list_entry) {
1359 if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
1360 map_info = list_itr->vsi_list_info;
1361 if (test_bit(vsi_handle, map_info->vsi_map)) {
1362 *vsi_list_id = map_info->vsi_list_id;
1363 return map_info;
1364 }
1365 }
1366 }
1367 return NULL;
1368}
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378static enum ice_status
1379ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
1380 struct ice_fltr_list_entry *f_entry)
1381{
1382 struct ice_switch_info *sw = hw->switch_info;
1383 struct ice_fltr_info *new_fltr, *cur_fltr;
1384 struct ice_fltr_mgmt_list_entry *m_entry;
1385 struct mutex *rule_lock;
1386 enum ice_status status = 0;
1387
1388 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
1389 return ICE_ERR_PARAM;
1390 f_entry->fltr_info.fwd_id.hw_vsi_id =
1391 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
1392
1393 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
1394
1395 mutex_lock(rule_lock);
1396 new_fltr = &f_entry->fltr_info;
1397 if (new_fltr->flag & ICE_FLTR_RX)
1398 new_fltr->src = hw->port_info->lport;
1399 else if (new_fltr->flag & ICE_FLTR_TX)
1400 new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
1401
1402 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
1403 if (!m_entry) {
1404 mutex_unlock(rule_lock);
1405 return ice_create_pkt_fwd_rule(hw, f_entry);
1406 }
1407
1408 cur_fltr = &m_entry->fltr_info;
1409 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
1410 mutex_unlock(rule_lock);
1411
1412 return status;
1413}
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424static enum ice_status
1425ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
1426 enum ice_sw_lkup_type lkup_type)
1427{
1428 struct ice_aqc_sw_rules_elem *s_rule;
1429 enum ice_status status;
1430 u16 s_rule_size;
1431
1432 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
1433 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
1434 if (!s_rule)
1435 return ICE_ERR_NO_MEMORY;
1436
1437 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
1438 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
1439
1440
1441
1442
1443 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
1444 ice_aqc_opc_free_res);
1445
1446 devm_kfree(ice_hw_to_dev(hw), s_rule);
1447 return status;
1448}
1449
1450
1451
1452
1453
1454
1455
1456
1457static enum ice_status
1458ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
1459 struct ice_fltr_mgmt_list_entry *fm_list)
1460{
1461 enum ice_sw_lkup_type lkup_type;
1462 enum ice_status status = 0;
1463 u16 vsi_list_id;
1464
1465 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
1466 fm_list->vsi_count == 0)
1467 return ICE_ERR_PARAM;
1468
1469
1470 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
1471 return ICE_ERR_DOES_NOT_EXIST;
1472
1473 lkup_type = fm_list->fltr_info.lkup_type;
1474 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
1475 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
1476 ice_aqc_opc_update_sw_rules,
1477 lkup_type);
1478 if (status)
1479 return status;
1480
1481 fm_list->vsi_count--;
1482 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
1483
1484 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
1485 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
1486 struct ice_vsi_list_map_info *vsi_list_info =
1487 fm_list->vsi_list_info;
1488 u16 rem_vsi_handle;
1489
1490 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
1491 ICE_MAX_VSI);
1492 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
1493 return ICE_ERR_OUT_OF_RANGE;
1494
1495
1496 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
1497 vsi_list_id, true,
1498 ice_aqc_opc_update_sw_rules,
1499 lkup_type);
1500 if (status)
1501 return status;
1502
1503 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
1504 tmp_fltr_info.fwd_id.hw_vsi_id =
1505 ice_get_hw_vsi_num(hw, rem_vsi_handle);
1506 tmp_fltr_info.vsi_handle = rem_vsi_handle;
1507 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
1508 if (status) {
1509 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
1510 tmp_fltr_info.fwd_id.hw_vsi_id, status);
1511 return status;
1512 }
1513
1514 fm_list->fltr_info = tmp_fltr_info;
1515 }
1516
1517 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
1518 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
1519 struct ice_vsi_list_map_info *vsi_list_info =
1520 fm_list->vsi_list_info;
1521
1522
1523 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
1524 if (status) {
1525 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
1526 vsi_list_id, status);
1527 return status;
1528 }
1529
1530 list_del(&vsi_list_info->list_entry);
1531 devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
1532 fm_list->vsi_list_info = NULL;
1533 }
1534
1535 return status;
1536}
1537
1538
1539
1540
1541
1542
1543
1544static enum ice_status
1545ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
1546 struct ice_fltr_list_entry *f_entry)
1547{
1548 struct ice_switch_info *sw = hw->switch_info;
1549 struct ice_fltr_mgmt_list_entry *list_elem;
1550 struct mutex *rule_lock;
1551 enum ice_status status = 0;
1552 bool remove_rule = false;
1553 u16 vsi_handle;
1554
1555 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
1556 return ICE_ERR_PARAM;
1557 f_entry->fltr_info.fwd_id.hw_vsi_id =
1558 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
1559
1560 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
1561 mutex_lock(rule_lock);
1562 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
1563 if (!list_elem) {
1564 status = ICE_ERR_DOES_NOT_EXIST;
1565 goto exit;
1566 }
1567
1568 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
1569 remove_rule = true;
1570 } else if (!list_elem->vsi_list_info) {
1571 status = ICE_ERR_DOES_NOT_EXIST;
1572 goto exit;
1573 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
1574
1575
1576
1577
1578
1579 list_elem->vsi_list_info->ref_cnt--;
1580 remove_rule = true;
1581 } else {
1582
1583
1584
1585
1586
1587 vsi_handle = f_entry->fltr_info.vsi_handle;
1588 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
1589 if (status)
1590 goto exit;
1591
1592 if (list_elem->vsi_count == 0)
1593 remove_rule = true;
1594 }
1595
1596 if (remove_rule) {
1597
1598 struct ice_aqc_sw_rules_elem *s_rule;
1599
1600 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1601 ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
1602 GFP_KERNEL);
1603 if (!s_rule) {
1604 status = ICE_ERR_NO_MEMORY;
1605 goto exit;
1606 }
1607
1608 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
1609 ice_aqc_opc_remove_sw_rules);
1610
1611 status = ice_aq_sw_rules(hw, s_rule,
1612 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
1613 ice_aqc_opc_remove_sw_rules, NULL);
1614
1615
1616 devm_kfree(ice_hw_to_dev(hw), s_rule);
1617
1618 if (status)
1619 goto exit;
1620
1621 list_del(&list_elem->list_entry);
1622 devm_kfree(ice_hw_to_dev(hw), list_elem);
1623 }
1624exit:
1625 mutex_unlock(rule_lock);
1626 return status;
1627}
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
1641{
1642 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
1643 struct ice_fltr_list_entry *m_list_itr;
1644 struct list_head *rule_head;
1645 u16 total_elem_left, s_rule_size;
1646 struct ice_switch_info *sw;
1647 struct mutex *rule_lock;
1648 enum ice_status status = 0;
1649 u16 num_unicast = 0;
1650 u8 elem_sent;
1651
1652 if (!m_list || !hw)
1653 return ICE_ERR_PARAM;
1654
1655 s_rule = NULL;
1656 sw = hw->switch_info;
1657 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
1658 list_for_each_entry(m_list_itr, m_list, list_entry) {
1659 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
1660 u16 vsi_handle;
1661 u16 hw_vsi_id;
1662
1663 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
1664 vsi_handle = m_list_itr->fltr_info.vsi_handle;
1665 if (!ice_is_vsi_valid(hw, vsi_handle))
1666 return ICE_ERR_PARAM;
1667 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
1668 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
1669
1670 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
1671 return ICE_ERR_PARAM;
1672 m_list_itr->fltr_info.src = hw_vsi_id;
1673 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
1674 is_zero_ether_addr(add))
1675 return ICE_ERR_PARAM;
1676 if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
1677
1678 mutex_lock(rule_lock);
1679 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
1680 &m_list_itr->fltr_info)) {
1681 mutex_unlock(rule_lock);
1682 return ICE_ERR_ALREADY_EXISTS;
1683 }
1684 mutex_unlock(rule_lock);
1685 num_unicast++;
1686 } else if (is_multicast_ether_addr(add) ||
1687 (is_unicast_ether_addr(add) && hw->ucast_shared)) {
1688 m_list_itr->status =
1689 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
1690 m_list_itr);
1691 if (m_list_itr->status)
1692 return m_list_itr->status;
1693 }
1694 }
1695
1696 mutex_lock(rule_lock);
1697
1698 if (!num_unicast) {
1699 status = 0;
1700 goto ice_add_mac_exit;
1701 }
1702
1703 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
1704
1705
1706 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1707 s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
1708 GFP_KERNEL);
1709 if (!s_rule) {
1710 status = ICE_ERR_NO_MEMORY;
1711 goto ice_add_mac_exit;
1712 }
1713
1714 r_iter = s_rule;
1715 list_for_each_entry(m_list_itr, m_list, list_entry) {
1716 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
1717 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
1718
1719 if (is_unicast_ether_addr(mac_addr)) {
1720 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
1721 ice_aqc_opc_add_sw_rules);
1722 r_iter = (struct ice_aqc_sw_rules_elem *)
1723 ((u8 *)r_iter + s_rule_size);
1724 }
1725 }
1726
1727
1728 r_iter = s_rule;
1729
1730 for (total_elem_left = num_unicast; total_elem_left > 0;
1731 total_elem_left -= elem_sent) {
1732 struct ice_aqc_sw_rules_elem *entry = r_iter;
1733
1734 elem_sent = min_t(u8, total_elem_left,
1735 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
1736 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
1737 elem_sent, ice_aqc_opc_add_sw_rules,
1738 NULL);
1739 if (status)
1740 goto ice_add_mac_exit;
1741 r_iter = (struct ice_aqc_sw_rules_elem *)
1742 ((u8 *)r_iter + (elem_sent * s_rule_size));
1743 }
1744
1745
1746 r_iter = s_rule;
1747 list_for_each_entry(m_list_itr, m_list, list_entry) {
1748 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
1749 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
1750 struct ice_fltr_mgmt_list_entry *fm_entry;
1751
1752 if (is_unicast_ether_addr(mac_addr)) {
1753 f_info->fltr_rule_id =
1754 le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
1755 f_info->fltr_act = ICE_FWD_TO_VSI;
1756
1757 fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
1758 sizeof(*fm_entry), GFP_KERNEL);
1759 if (!fm_entry) {
1760 status = ICE_ERR_NO_MEMORY;
1761 goto ice_add_mac_exit;
1762 }
1763 fm_entry->fltr_info = *f_info;
1764 fm_entry->vsi_count = 1;
1765
1766
1767
1768
1769 list_add(&fm_entry->list_entry, rule_head);
1770 r_iter = (struct ice_aqc_sw_rules_elem *)
1771 ((u8 *)r_iter + s_rule_size);
1772 }
1773 }
1774
1775ice_add_mac_exit:
1776 mutex_unlock(rule_lock);
1777 if (s_rule)
1778 devm_kfree(ice_hw_to_dev(hw), s_rule);
1779 return status;
1780}
1781
1782
1783
1784
1785
1786
1787static enum ice_status
1788ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
1789{
1790 struct ice_switch_info *sw = hw->switch_info;
1791 struct ice_fltr_mgmt_list_entry *v_list_itr;
1792 struct ice_fltr_info *new_fltr, *cur_fltr;
1793 enum ice_sw_lkup_type lkup_type;
1794 u16 vsi_list_id = 0, vsi_handle;
1795 struct mutex *rule_lock;
1796 enum ice_status status = 0;
1797
1798 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
1799 return ICE_ERR_PARAM;
1800
1801 f_entry->fltr_info.fwd_id.hw_vsi_id =
1802 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
1803 new_fltr = &f_entry->fltr_info;
1804
1805
1806 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
1807 return ICE_ERR_PARAM;
1808
1809 if (new_fltr->src_id != ICE_SRC_ID_VSI)
1810 return ICE_ERR_PARAM;
1811
1812 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
1813 lkup_type = new_fltr->lkup_type;
1814 vsi_handle = new_fltr->vsi_handle;
1815 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
1816 mutex_lock(rule_lock);
1817 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
1818 if (!v_list_itr) {
1819 struct ice_vsi_list_map_info *map_info = NULL;
1820
1821 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
1822
1823
1824
1825
1826
1827 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
1828 vsi_handle,
1829 &vsi_list_id);
1830 if (!map_info) {
1831 status = ice_create_vsi_list_rule(hw,
1832 &vsi_handle,
1833 1,
1834 &vsi_list_id,
1835 lkup_type);
1836 if (status)
1837 goto exit;
1838 }
1839
1840 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
1841 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
1842 }
1843
1844 status = ice_create_pkt_fwd_rule(hw, f_entry);
1845 if (!status) {
1846 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
1847 new_fltr);
1848 if (!v_list_itr) {
1849 status = ICE_ERR_DOES_NOT_EXIST;
1850 goto exit;
1851 }
1852
1853 if (map_info) {
1854 v_list_itr->vsi_list_info = map_info;
1855 map_info->ref_cnt++;
1856 } else {
1857 v_list_itr->vsi_list_info =
1858 ice_create_vsi_list_map(hw, &vsi_handle,
1859 1, vsi_list_id);
1860 }
1861 }
1862 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
1863
1864
1865
1866 cur_fltr = &v_list_itr->fltr_info;
1867 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
1868 new_fltr);
1869 } else {
1870
1871
1872
1873
1874
1875 struct ice_fltr_info tmp_fltr;
1876 u16 vsi_handle_arr[2];
1877 u16 cur_handle;
1878
1879
1880
1881
1882 if (v_list_itr->vsi_count > 1 &&
1883 v_list_itr->vsi_list_info->ref_cnt > 1) {
1884 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
1885 status = ICE_ERR_CFG;
1886 goto exit;
1887 }
1888
1889 cur_handle =
1890 find_first_bit(v_list_itr->vsi_list_info->vsi_map,
1891 ICE_MAX_VSI);
1892
1893
1894 if (cur_handle == vsi_handle) {
1895 status = ICE_ERR_ALREADY_EXISTS;
1896 goto exit;
1897 }
1898
1899 vsi_handle_arr[0] = cur_handle;
1900 vsi_handle_arr[1] = vsi_handle;
1901 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
1902 &vsi_list_id, lkup_type);
1903 if (status)
1904 goto exit;
1905
1906 tmp_fltr = v_list_itr->fltr_info;
1907 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
1908 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
1909 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
1910
1911
1912
1913 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
1914 if (status)
1915 goto exit;
1916
1917
1918
1919
1920 v_list_itr->vsi_list_info->ref_cnt--;
1921
1922
1923 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
1924 v_list_itr->vsi_list_info =
1925 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
1926 vsi_list_id);
1927 v_list_itr->vsi_count++;
1928 }
1929
1930exit:
1931 mutex_unlock(rule_lock);
1932 return status;
1933}
1934
1935
1936
1937
1938
1939
1940enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
1941{
1942 struct ice_fltr_list_entry *v_list_itr;
1943
1944 if (!v_list || !hw)
1945 return ICE_ERR_PARAM;
1946
1947 list_for_each_entry(v_list_itr, v_list, list_entry) {
1948 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
1949 return ICE_ERR_PARAM;
1950 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1951 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
1952 if (v_list_itr->status)
1953 return v_list_itr->status;
1954 }
1955 return 0;
1956}
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967enum ice_status
1968ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
1969{
1970 struct ice_fltr_list_entry *em_list_itr;
1971
1972 if (!em_list || !hw)
1973 return ICE_ERR_PARAM;
1974
1975 list_for_each_entry(em_list_itr, em_list, list_entry) {
1976 enum ice_sw_lkup_type l_type =
1977 em_list_itr->fltr_info.lkup_type;
1978
1979 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
1980 l_type != ICE_SW_LKUP_ETHERTYPE)
1981 return ICE_ERR_PARAM;
1982
1983 em_list_itr->status = ice_add_rule_internal(hw, l_type,
1984 em_list_itr);
1985 if (em_list_itr->status)
1986 return em_list_itr->status;
1987 }
1988 return 0;
1989}
1990
1991
1992
1993
1994
1995
1996enum ice_status
1997ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
1998{
1999 struct ice_fltr_list_entry *em_list_itr, *tmp;
2000
2001 if (!em_list || !hw)
2002 return ICE_ERR_PARAM;
2003
2004 list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
2005 enum ice_sw_lkup_type l_type =
2006 em_list_itr->fltr_info.lkup_type;
2007
2008 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
2009 l_type != ICE_SW_LKUP_ETHERTYPE)
2010 return ICE_ERR_PARAM;
2011
2012 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
2013 em_list_itr);
2014 if (em_list_itr->status)
2015 return em_list_itr->status;
2016 }
2017 return 0;
2018}
2019
2020
2021
2022
2023
2024
2025static void
2026ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
2027{
2028 if (!list_empty(rule_head)) {
2029 struct ice_fltr_mgmt_list_entry *entry;
2030 struct ice_fltr_mgmt_list_entry *tmp;
2031
2032 list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
2033 list_del(&entry->list_entry);
2034 devm_kfree(ice_hw_to_dev(hw), entry);
2035 }
2036 }
2037}
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049enum ice_status
2050ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
2051{
2052 struct ice_aqc_sw_rules_elem *s_rule;
2053 struct ice_fltr_info f_info;
2054 enum ice_adminq_opc opcode;
2055 enum ice_status status;
2056 u16 s_rule_size;
2057 u16 hw_vsi_id;
2058
2059 if (!ice_is_vsi_valid(hw, vsi_handle))
2060 return ICE_ERR_PARAM;
2061 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2062
2063 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
2064 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
2065
2066 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2067 if (!s_rule)
2068 return ICE_ERR_NO_MEMORY;
2069
2070 memset(&f_info, 0, sizeof(f_info));
2071
2072 f_info.lkup_type = ICE_SW_LKUP_DFLT;
2073 f_info.flag = direction;
2074 f_info.fltr_act = ICE_FWD_TO_VSI;
2075 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
2076
2077 if (f_info.flag & ICE_FLTR_RX) {
2078 f_info.src = hw->port_info->lport;
2079 f_info.src_id = ICE_SRC_ID_LPORT;
2080 if (!set)
2081 f_info.fltr_rule_id =
2082 hw->port_info->dflt_rx_vsi_rule_id;
2083 } else if (f_info.flag & ICE_FLTR_TX) {
2084 f_info.src_id = ICE_SRC_ID_VSI;
2085 f_info.src = hw_vsi_id;
2086 if (!set)
2087 f_info.fltr_rule_id =
2088 hw->port_info->dflt_tx_vsi_rule_id;
2089 }
2090
2091 if (set)
2092 opcode = ice_aqc_opc_add_sw_rules;
2093 else
2094 opcode = ice_aqc_opc_remove_sw_rules;
2095
2096 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
2097
2098 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
2099 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
2100 goto out;
2101 if (set) {
2102 u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
2103
2104 if (f_info.flag & ICE_FLTR_TX) {
2105 hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
2106 hw->port_info->dflt_tx_vsi_rule_id = index;
2107 } else if (f_info.flag & ICE_FLTR_RX) {
2108 hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
2109 hw->port_info->dflt_rx_vsi_rule_id = index;
2110 }
2111 } else {
2112 if (f_info.flag & ICE_FLTR_TX) {
2113 hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2114 hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
2115 } else if (f_info.flag & ICE_FLTR_RX) {
2116 hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2117 hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
2118 }
2119 }
2120
2121out:
2122 devm_kfree(ice_hw_to_dev(hw), s_rule);
2123 return status;
2124}
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138static struct ice_fltr_mgmt_list_entry *
2139ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
2140 struct ice_fltr_info *f_info)
2141{
2142 struct ice_switch_info *sw = hw->switch_info;
2143 struct ice_fltr_mgmt_list_entry *list_itr;
2144 struct list_head *list_head;
2145
2146 list_head = &sw->recp_list[recp_id].filt_rules;
2147 list_for_each_entry(list_itr, list_head, list_entry) {
2148 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2149 sizeof(f_info->l_data)) &&
2150 f_info->fwd_id.hw_vsi_id ==
2151 list_itr->fltr_info.fwd_id.hw_vsi_id &&
2152 f_info->flag == list_itr->fltr_info.flag)
2153 return list_itr;
2154 }
2155 return NULL;
2156}
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
2172{
2173 struct ice_fltr_list_entry *list_itr, *tmp;
2174 struct mutex *rule_lock;
2175
2176 if (!m_list)
2177 return ICE_ERR_PARAM;
2178
2179 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2180 list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
2181 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
2182 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
2183 u16 vsi_handle;
2184
2185 if (l_type != ICE_SW_LKUP_MAC)
2186 return ICE_ERR_PARAM;
2187
2188 vsi_handle = list_itr->fltr_info.vsi_handle;
2189 if (!ice_is_vsi_valid(hw, vsi_handle))
2190 return ICE_ERR_PARAM;
2191
2192 list_itr->fltr_info.fwd_id.hw_vsi_id =
2193 ice_get_hw_vsi_num(hw, vsi_handle);
2194 if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
2195
2196
2197
2198
2199 mutex_lock(rule_lock);
2200 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
2201 &list_itr->fltr_info)) {
2202 mutex_unlock(rule_lock);
2203 return ICE_ERR_DOES_NOT_EXIST;
2204 }
2205 mutex_unlock(rule_lock);
2206 }
2207 list_itr->status = ice_remove_rule_internal(hw,
2208 ICE_SW_LKUP_MAC,
2209 list_itr);
2210 if (list_itr->status)
2211 return list_itr->status;
2212 }
2213 return 0;
2214}
2215
2216
2217
2218
2219
2220
2221enum ice_status
2222ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
2223{
2224 struct ice_fltr_list_entry *v_list_itr, *tmp;
2225
2226 if (!v_list || !hw)
2227 return ICE_ERR_PARAM;
2228
2229 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
2230 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
2231
2232 if (l_type != ICE_SW_LKUP_VLAN)
2233 return ICE_ERR_PARAM;
2234 v_list_itr->status = ice_remove_rule_internal(hw,
2235 ICE_SW_LKUP_VLAN,
2236 v_list_itr);
2237 if (v_list_itr->status)
2238 return v_list_itr->status;
2239 }
2240 return 0;
2241}
2242
2243
2244
2245
2246
2247
2248static bool
2249ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
2250{
2251 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
2252 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
2253 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
2254 fm_entry->vsi_list_info &&
2255 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
2256}
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271static enum ice_status
2272ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
2273 struct list_head *vsi_list_head,
2274 struct ice_fltr_info *fi)
2275{
2276 struct ice_fltr_list_entry *tmp;
2277
2278
2279
2280
2281 tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
2282 if (!tmp)
2283 return ICE_ERR_NO_MEMORY;
2284
2285 tmp->fltr_info = *fi;
2286
2287
2288
2289
2290
2291
2292 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
2293 tmp->fltr_info.vsi_handle = vsi_handle;
2294 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2295
2296 list_add(&tmp->list_entry, vsi_list_head);
2297
2298 return 0;
2299}
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314static enum ice_status
2315ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
2316 struct list_head *lkup_list_head,
2317 struct list_head *vsi_list_head)
2318{
2319 struct ice_fltr_mgmt_list_entry *fm_entry;
2320 enum ice_status status = 0;
2321
2322
2323 if (!ice_is_vsi_valid(hw, vsi_handle))
2324 return ICE_ERR_PARAM;
2325
2326 list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
2327 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
2328 continue;
2329
2330 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
2331 vsi_list_head,
2332 &fm_entry->fltr_info);
2333 if (status)
2334 return status;
2335 }
2336 return status;
2337}
2338
2339
2340
2341
2342
2343
2344
2345
2346static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
2347{
2348 u16 vid = fi->l_data.mac_vlan.vlan_id;
2349 u8 *macaddr = fi->l_data.mac.mac_addr;
2350 bool is_tx_fltr = false;
2351 u8 promisc_mask = 0;
2352
2353 if (fi->flag == ICE_FLTR_TX)
2354 is_tx_fltr = true;
2355
2356 if (is_broadcast_ether_addr(macaddr))
2357 promisc_mask |= is_tx_fltr ?
2358 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
2359 else if (is_multicast_ether_addr(macaddr))
2360 promisc_mask |= is_tx_fltr ?
2361 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
2362 else if (is_unicast_ether_addr(macaddr))
2363 promisc_mask |= is_tx_fltr ?
2364 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
2365 if (vid)
2366 promisc_mask |= is_tx_fltr ?
2367 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
2368
2369 return promisc_mask;
2370}
2371
2372
2373
2374
2375
2376
2377
2378static enum ice_status
2379ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
2380 struct list_head *v_list)
2381{
2382 struct ice_fltr_list_entry *v_list_itr, *tmp;
2383
2384 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
2385 v_list_itr->status =
2386 ice_remove_rule_internal(hw, recp_id, v_list_itr);
2387 if (v_list_itr->status)
2388 return v_list_itr->status;
2389 }
2390 return 0;
2391}
2392
2393
2394
2395
2396
2397
2398
2399
2400enum ice_status
2401ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
2402 u16 vid)
2403{
2404 struct ice_switch_info *sw = hw->switch_info;
2405 struct ice_fltr_list_entry *fm_entry, *tmp;
2406 struct list_head remove_list_head;
2407 struct ice_fltr_mgmt_list_entry *itr;
2408 struct list_head *rule_head;
2409 struct mutex *rule_lock;
2410 enum ice_status status = 0;
2411 u8 recipe_id;
2412
2413 if (!ice_is_vsi_valid(hw, vsi_handle))
2414 return ICE_ERR_PARAM;
2415
2416 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
2417 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
2418 else
2419 recipe_id = ICE_SW_LKUP_PROMISC;
2420
2421 rule_head = &sw->recp_list[recipe_id].filt_rules;
2422 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
2423
2424 INIT_LIST_HEAD(&remove_list_head);
2425
2426 mutex_lock(rule_lock);
2427 list_for_each_entry(itr, rule_head, list_entry) {
2428 struct ice_fltr_info *fltr_info;
2429 u8 fltr_promisc_mask = 0;
2430
2431 if (!ice_vsi_uses_fltr(itr, vsi_handle))
2432 continue;
2433 fltr_info = &itr->fltr_info;
2434
2435 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
2436 vid != fltr_info->l_data.mac_vlan.vlan_id)
2437 continue;
2438
2439 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
2440
2441
2442 if (fltr_promisc_mask & ~promisc_mask)
2443 continue;
2444
2445 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
2446 &remove_list_head,
2447 fltr_info);
2448 if (status) {
2449 mutex_unlock(rule_lock);
2450 goto free_fltr_list;
2451 }
2452 }
2453 mutex_unlock(rule_lock);
2454
2455 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
2456
2457free_fltr_list:
2458 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
2459 list_del(&fm_entry->list_entry);
2460 devm_kfree(ice_hw_to_dev(hw), fm_entry);
2461 }
2462
2463 return status;
2464}
2465
2466
2467
2468
2469
2470
2471
2472
2473enum ice_status
2474ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
2475{
2476 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
2477 struct ice_fltr_list_entry f_list_entry;
2478 struct ice_fltr_info new_fltr;
2479 enum ice_status status = 0;
2480 bool is_tx_fltr;
2481 u16 hw_vsi_id;
2482 int pkt_type;
2483 u8 recipe_id;
2484
2485 if (!ice_is_vsi_valid(hw, vsi_handle))
2486 return ICE_ERR_PARAM;
2487 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2488
2489 memset(&new_fltr, 0, sizeof(new_fltr));
2490
2491 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
2492 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
2493 new_fltr.l_data.mac_vlan.vlan_id = vid;
2494 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
2495 } else {
2496 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
2497 recipe_id = ICE_SW_LKUP_PROMISC;
2498 }
2499
2500
2501
2502
2503
2504
2505 while (promisc_mask) {
2506 u8 *mac_addr;
2507
2508 pkt_type = 0;
2509 is_tx_fltr = false;
2510
2511 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
2512 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
2513 pkt_type = UCAST_FLTR;
2514 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
2515 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
2516 pkt_type = UCAST_FLTR;
2517 is_tx_fltr = true;
2518 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
2519 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
2520 pkt_type = MCAST_FLTR;
2521 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
2522 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
2523 pkt_type = MCAST_FLTR;
2524 is_tx_fltr = true;
2525 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
2526 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
2527 pkt_type = BCAST_FLTR;
2528 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
2529 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
2530 pkt_type = BCAST_FLTR;
2531 is_tx_fltr = true;
2532 }
2533
2534
2535 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
2536 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
2537 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
2538 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
2539 is_tx_fltr = true;
2540 }
2541
2542
2543 mac_addr = new_fltr.l_data.mac.mac_addr;
2544 if (pkt_type == BCAST_FLTR) {
2545 eth_broadcast_addr(mac_addr);
2546 } else if (pkt_type == MCAST_FLTR ||
2547 pkt_type == UCAST_FLTR) {
2548
2549 ether_addr_copy(mac_addr, dummy_eth_header);
2550 if (pkt_type == MCAST_FLTR)
2551 mac_addr[0] |= 0x1;
2552 }
2553
2554
2555 new_fltr.flag = 0;
2556 if (is_tx_fltr) {
2557 new_fltr.flag |= ICE_FLTR_TX;
2558 new_fltr.src = hw_vsi_id;
2559 } else {
2560 new_fltr.flag |= ICE_FLTR_RX;
2561 new_fltr.src = hw->port_info->lport;
2562 }
2563
2564 new_fltr.fltr_act = ICE_FWD_TO_VSI;
2565 new_fltr.vsi_handle = vsi_handle;
2566 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
2567 f_list_entry.fltr_info = new_fltr;
2568
2569 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
2570 if (status)
2571 goto set_promisc_exit;
2572 }
2573
2574set_promisc_exit:
2575 return status;
2576}
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587enum ice_status
2588ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
2589 bool rm_vlan_promisc)
2590{
2591 struct ice_switch_info *sw = hw->switch_info;
2592 struct ice_fltr_list_entry *list_itr, *tmp;
2593 struct list_head vsi_list_head;
2594 struct list_head *vlan_head;
2595 struct mutex *vlan_lock;
2596 enum ice_status status;
2597 u16 vlan_id;
2598
2599 INIT_LIST_HEAD(&vsi_list_head);
2600 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
2601 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
2602 mutex_lock(vlan_lock);
2603 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
2604 &vsi_list_head);
2605 mutex_unlock(vlan_lock);
2606 if (status)
2607 goto free_fltr_list;
2608
2609 list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
2610 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
2611 if (rm_vlan_promisc)
2612 status = ice_clear_vsi_promisc(hw, vsi_handle,
2613 promisc_mask, vlan_id);
2614 else
2615 status = ice_set_vsi_promisc(hw, vsi_handle,
2616 promisc_mask, vlan_id);
2617 if (status)
2618 break;
2619 }
2620
2621free_fltr_list:
2622 list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
2623 list_del(&list_itr->list_entry);
2624 devm_kfree(ice_hw_to_dev(hw), list_itr);
2625 }
2626 return status;
2627}
2628
2629
2630
2631
2632
2633
2634
2635static void
2636ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
2637 enum ice_sw_lkup_type lkup)
2638{
2639 struct ice_switch_info *sw = hw->switch_info;
2640 struct ice_fltr_list_entry *fm_entry;
2641 struct list_head remove_list_head;
2642 struct list_head *rule_head;
2643 struct ice_fltr_list_entry *tmp;
2644 struct mutex *rule_lock;
2645 enum ice_status status;
2646
2647 INIT_LIST_HEAD(&remove_list_head);
2648 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
2649 rule_head = &sw->recp_list[lkup].filt_rules;
2650 mutex_lock(rule_lock);
2651 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
2652 &remove_list_head);
2653 mutex_unlock(rule_lock);
2654 if (status)
2655 goto free_fltr_list;
2656
2657 switch (lkup) {
2658 case ICE_SW_LKUP_MAC:
2659 ice_remove_mac(hw, &remove_list_head);
2660 break;
2661 case ICE_SW_LKUP_VLAN:
2662 ice_remove_vlan(hw, &remove_list_head);
2663 break;
2664 case ICE_SW_LKUP_PROMISC:
2665 case ICE_SW_LKUP_PROMISC_VLAN:
2666 ice_remove_promisc(hw, lkup, &remove_list_head);
2667 break;
2668 case ICE_SW_LKUP_MAC_VLAN:
2669 case ICE_SW_LKUP_ETHERTYPE:
2670 case ICE_SW_LKUP_ETHERTYPE_MAC:
2671 case ICE_SW_LKUP_DFLT:
2672 case ICE_SW_LKUP_LAST:
2673 default:
2674 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
2675 break;
2676 }
2677
2678free_fltr_list:
2679 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
2680 list_del(&fm_entry->list_entry);
2681 devm_kfree(ice_hw_to_dev(hw), fm_entry);
2682 }
2683}
2684
2685
2686
2687
2688
2689
2690void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
2691{
2692 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
2693 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
2694 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
2695 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
2696 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
2697 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
2698 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
2699 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
2700}
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710enum ice_status
2711ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
2712 u16 *counter_id)
2713{
2714 struct ice_aqc_alloc_free_res_elem *buf;
2715 enum ice_status status;
2716 u16 buf_len;
2717
2718
2719 buf_len = struct_size(buf, elem, 1);
2720 buf = kzalloc(buf_len, GFP_KERNEL);
2721 if (!buf)
2722 return ICE_ERR_NO_MEMORY;
2723
2724 buf->num_elems = cpu_to_le16(num_items);
2725 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
2726 ICE_AQC_RES_TYPE_M) | alloc_shared);
2727
2728 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
2729 ice_aqc_opc_alloc_res, NULL);
2730 if (status)
2731 goto exit;
2732
2733 *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
2734
2735exit:
2736 kfree(buf);
2737 return status;
2738}
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748enum ice_status
2749ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
2750 u16 counter_id)
2751{
2752 struct ice_aqc_alloc_free_res_elem *buf;
2753 enum ice_status status;
2754 u16 buf_len;
2755
2756
2757 buf_len = struct_size(buf, elem, 1);
2758 buf = kzalloc(buf_len, GFP_KERNEL);
2759 if (!buf)
2760 return ICE_ERR_NO_MEMORY;
2761
2762 buf->num_elems = cpu_to_le16(num_items);
2763 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
2764 ICE_AQC_RES_TYPE_M) | alloc_shared);
2765 buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
2766
2767 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
2768 ice_aqc_opc_free_res, NULL);
2769 if (status)
2770 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
2771
2772 kfree(buf);
2773 return status;
2774}
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786static enum ice_status
2787ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
2788 struct list_head *list_head)
2789{
2790 struct ice_fltr_mgmt_list_entry *itr;
2791 enum ice_status status = 0;
2792 u16 hw_vsi_id;
2793
2794 if (list_empty(list_head))
2795 return status;
2796 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2797
2798 list_for_each_entry(itr, list_head, list_entry) {
2799 struct ice_fltr_list_entry f_entry;
2800
2801 f_entry.fltr_info = itr->fltr_info;
2802 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
2803 itr->fltr_info.vsi_handle == vsi_handle) {
2804
2805 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
2806 f_entry.fltr_info.src = hw_vsi_id;
2807 status = ice_add_rule_internal(hw, recp_id, &f_entry);
2808 if (status)
2809 goto end;
2810 continue;
2811 }
2812 if (!itr->vsi_list_info ||
2813 !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
2814 continue;
2815
2816 clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
2817 f_entry.fltr_info.vsi_handle = vsi_handle;
2818 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
2819
2820 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
2821 f_entry.fltr_info.src = hw_vsi_id;
2822 if (recp_id == ICE_SW_LKUP_VLAN)
2823 status = ice_add_vlan_internal(hw, &f_entry);
2824 else
2825 status = ice_add_rule_internal(hw, recp_id, &f_entry);
2826 if (status)
2827 goto end;
2828 }
2829end:
2830 return status;
2831}
2832
2833
2834
2835
2836
2837
2838
2839
2840enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
2841{
2842 struct ice_switch_info *sw = hw->switch_info;
2843 enum ice_status status = 0;
2844 u8 i;
2845
2846 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
2847 struct list_head *head;
2848
2849 head = &sw->recp_list[i].filt_replay_rules;
2850 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
2851 if (status)
2852 return status;
2853 }
2854 return status;
2855}
2856
2857
2858
2859
2860
2861
2862
2863void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
2864{
2865 struct ice_switch_info *sw = hw->switch_info;
2866 u8 i;
2867
2868 if (!sw)
2869 return;
2870
2871 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
2872 if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
2873 struct list_head *l_head;
2874
2875 l_head = &sw->recp_list[i].filt_replay_rules;
2876 ice_rem_sw_rule_info(hw, l_head);
2877 }
2878 }
2879}
2880