1
2
3
4#include "ice_switch.h"
5
6#define ICE_ETH_DA_OFFSET 0
7#define ICE_ETH_ETHTYPE_OFFSET 12
8#define ICE_ETH_VLAN_TCI_OFFSET 14
9#define ICE_MAX_VLAN_ID 0xFFF
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#define DUMMY_ETH_HDR_LEN 16
27static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
28 0x2, 0, 0, 0, 0, 0,
29 0x81, 0, 0, 0};
30
31#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
32 (sizeof(struct ice_aqc_sw_rules_elem) - \
33 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
34 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
35#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
36 (sizeof(struct ice_aqc_sw_rules_elem) - \
37 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
38 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
39#define ICE_SW_RULE_LG_ACT_SIZE(n) \
40 (sizeof(struct ice_aqc_sw_rules_elem) - \
41 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
42 sizeof(struct ice_sw_rule_lg_act) - \
43 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
44 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
45#define ICE_SW_RULE_VSI_LIST_SIZE(n) \
46 (sizeof(struct ice_aqc_sw_rules_elem) - \
47 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
48 sizeof(struct ice_sw_rule_vsi_list) - \
49 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
50 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
51
52
53
54
55
56
57
58
59
60
61
62
63static enum ice_status
64ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
65 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
66 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
67{
68 struct ice_aqc_alloc_free_res_cmd *cmd;
69 struct ice_aq_desc desc;
70
71 cmd = &desc.params.sw_res_ctrl;
72
73 if (!buf)
74 return ICE_ERR_PARAM;
75
76 if (buf_size < (num_entries * sizeof(buf->elem[0])))
77 return ICE_ERR_PARAM;
78
79 ice_fill_dflt_direct_cmd_desc(&desc, opc);
80
81 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
82
83 cmd->num_entries = cpu_to_le16(num_entries);
84
85 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
86}
87
88
89
90
91
92
93
94
95enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
96{
97 struct ice_sw_recipe *recps;
98 u8 i;
99
100 recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
101 sizeof(*recps), GFP_KERNEL);
102 if (!recps)
103 return ICE_ERR_NO_MEMORY;
104
105 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
106 recps[i].root_rid = i;
107 INIT_LIST_HEAD(&recps[i].filt_rules);
108 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
109 mutex_init(&recps[i].filt_rule_lock);
110 }
111
112 hw->switch_info->recp_list = recps;
113
114 return 0;
115}
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142static enum ice_status
143ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
144 u16 buf_size, u16 *req_desc, u16 *num_elems,
145 struct ice_sq_cd *cd)
146{
147 struct ice_aqc_get_sw_cfg *cmd;
148 enum ice_status status;
149 struct ice_aq_desc desc;
150
151 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
152 cmd = &desc.params.get_sw_conf;
153 cmd->element = cpu_to_le16(*req_desc);
154
155 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
156 if (!status) {
157 *req_desc = le16_to_cpu(cmd->element);
158 *num_elems = le16_to_cpu(cmd->num_elems);
159 }
160
161 return status;
162}
163
164
165
166
167
168
169
170
171
172static enum ice_status
173ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
174 struct ice_sq_cd *cd)
175{
176 struct ice_aqc_add_update_free_vsi_resp *res;
177 struct ice_aqc_add_get_update_free_vsi *cmd;
178 struct ice_aq_desc desc;
179 enum ice_status status;
180
181 cmd = &desc.params.vsi_cmd;
182 res = &desc.params.add_update_free_vsi_res;
183
184 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
185
186 if (!vsi_ctx->alloc_from_pool)
187 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
188 ICE_AQ_VSI_IS_VALID);
189 cmd->vf_id = vsi_ctx->vf_num;
190
191 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
192
193 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
194
195 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
196 sizeof(vsi_ctx->info), cd);
197
198 if (!status) {
199 vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
200 vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
201 vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
202 }
203
204 return status;
205}
206
207
208
209
210
211
212
213
214
215
216static enum ice_status
217ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
218 bool keep_vsi_alloc, struct ice_sq_cd *cd)
219{
220 struct ice_aqc_add_update_free_vsi_resp *resp;
221 struct ice_aqc_add_get_update_free_vsi *cmd;
222 struct ice_aq_desc desc;
223 enum ice_status status;
224
225 cmd = &desc.params.vsi_cmd;
226 resp = &desc.params.add_update_free_vsi_res;
227
228 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
229
230 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
231 if (keep_vsi_alloc)
232 cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
233
234 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
235 if (!status) {
236 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
237 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
238 }
239
240 return status;
241}
242
243
244
245
246
247
248
249
250
251static enum ice_status
252ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
253 struct ice_sq_cd *cd)
254{
255 struct ice_aqc_add_update_free_vsi_resp *resp;
256 struct ice_aqc_add_get_update_free_vsi *cmd;
257 struct ice_aq_desc desc;
258 enum ice_status status;
259
260 cmd = &desc.params.vsi_cmd;
261 resp = &desc.params.add_update_free_vsi_res;
262
263 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
264
265 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
266
267 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
268
269 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
270 sizeof(vsi_ctx->info), cd);
271
272 if (!status) {
273 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
274 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
275 }
276
277 return status;
278}
279
280
281
282
283
284
285
286
287bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
288{
289 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
290}
291
292
293
294
295
296
297
298
299
300u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
301{
302 return hw->vsi_ctx[vsi_handle]->vsi_num;
303}
304
305
306
307
308
309
310
311
312struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
313{
314 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
315}
316
317
318
319
320
321
322
323
324
325static void
326ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
327{
328 hw->vsi_ctx[vsi_handle] = vsi;
329}
330
331
332
333
334
335
336static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
337{
338 struct ice_vsi_ctx *vsi;
339 u8 i;
340
341 vsi = ice_get_vsi_ctx(hw, vsi_handle);
342 if (!vsi)
343 return;
344 ice_for_each_traffic_class(i) {
345 if (vsi->lan_q_ctx[i]) {
346 devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
347 vsi->lan_q_ctx[i] = NULL;
348 }
349 }
350}
351
352
353
354
355
356
357
358
359static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
360{
361 struct ice_vsi_ctx *vsi;
362
363 vsi = ice_get_vsi_ctx(hw, vsi_handle);
364 if (vsi) {
365 ice_clear_vsi_q_ctx(hw, vsi_handle);
366 devm_kfree(ice_hw_to_dev(hw), vsi);
367 hw->vsi_ctx[vsi_handle] = NULL;
368 }
369}
370
371
372
373
374
375void ice_clear_all_vsi_ctx(struct ice_hw *hw)
376{
377 u16 i;
378
379 for (i = 0; i < ICE_MAX_VSI; i++)
380 ice_clear_vsi_ctx(hw, i);
381}
382
383
384
385
386
387
388
389
390
391
392
393
394enum ice_status
395ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
396 struct ice_sq_cd *cd)
397{
398 struct ice_vsi_ctx *tmp_vsi_ctx;
399 enum ice_status status;
400
401 if (vsi_handle >= ICE_MAX_VSI)
402 return ICE_ERR_PARAM;
403 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
404 if (status)
405 return status;
406 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
407 if (!tmp_vsi_ctx) {
408
409 tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
410 sizeof(*tmp_vsi_ctx), GFP_KERNEL);
411 if (!tmp_vsi_ctx) {
412 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
413 return ICE_ERR_NO_MEMORY;
414 }
415 *tmp_vsi_ctx = *vsi_ctx;
416 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
417 } else {
418
419 if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
420 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
421 }
422
423 return 0;
424}
425
426
427
428
429
430
431
432
433
434
435
436enum ice_status
437ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
438 bool keep_vsi_alloc, struct ice_sq_cd *cd)
439{
440 enum ice_status status;
441
442 if (!ice_is_vsi_valid(hw, vsi_handle))
443 return ICE_ERR_PARAM;
444 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
445 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
446 if (!status)
447 ice_clear_vsi_ctx(hw, vsi_handle);
448 return status;
449}
450
451
452
453
454
455
456
457
458
459
460enum ice_status
461ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
462 struct ice_sq_cd *cd)
463{
464 if (!ice_is_vsi_valid(hw, vsi_handle))
465 return ICE_ERR_PARAM;
466 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
467 return ice_aq_update_vsi(hw, vsi_ctx, cd);
468}
469
470
471
472
473
474
475
476
477
478
479static enum ice_status
480ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
481 enum ice_sw_lkup_type lkup_type,
482 enum ice_adminq_opc opc)
483{
484 struct ice_aqc_alloc_free_res_elem *sw_buf;
485 struct ice_aqc_res_elem *vsi_ele;
486 enum ice_status status;
487 u16 buf_len;
488
489 buf_len = sizeof(*sw_buf);
490 sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
491 if (!sw_buf)
492 return ICE_ERR_NO_MEMORY;
493 sw_buf->num_elems = cpu_to_le16(1);
494
495 if (lkup_type == ICE_SW_LKUP_MAC ||
496 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
497 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
498 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
499 lkup_type == ICE_SW_LKUP_PROMISC ||
500 lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
501 sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
502 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
503 sw_buf->res_type =
504 cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
505 } else {
506 status = ICE_ERR_PARAM;
507 goto ice_aq_alloc_free_vsi_list_exit;
508 }
509
510 if (opc == ice_aqc_opc_free_res)
511 sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
512
513 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
514 if (status)
515 goto ice_aq_alloc_free_vsi_list_exit;
516
517 if (opc == ice_aqc_opc_alloc_res) {
518 vsi_ele = &sw_buf->elem[0];
519 *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
520 }
521
522ice_aq_alloc_free_vsi_list_exit:
523 devm_kfree(ice_hw_to_dev(hw), sw_buf);
524 return status;
525}
526
527
528
529
530
531
532
533
534
535
536
537
538static enum ice_status
539ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
540 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
541{
542 struct ice_aq_desc desc;
543
544 if (opc != ice_aqc_opc_add_sw_rules &&
545 opc != ice_aqc_opc_update_sw_rules &&
546 opc != ice_aqc_opc_remove_sw_rules)
547 return ICE_ERR_PARAM;
548
549 ice_fill_dflt_direct_cmd_desc(&desc, opc);
550
551 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
552 desc.params.sw_rules.num_rules_fltr_entry_index =
553 cpu_to_le16(num_rules);
554 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
555}
556
557
558
559
560
561
562
563
564
565static void
566ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
567 u16 swid, u16 pf_vf_num, bool is_vf)
568{
569 switch (type) {
570 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
571 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
572 pi->sw_id = swid;
573 pi->pf_vf_num = pf_vf_num;
574 pi->is_vf = is_vf;
575 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
576 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
577 break;
578 default:
579 ice_debug(pi->hw, ICE_DBG_SW,
580 "incorrect VSI/port type received\n");
581 break;
582 }
583}
584
585
586
587
588enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
589{
590 struct ice_aqc_get_sw_cfg_resp *rbuf;
591 enum ice_status status;
592 u16 req_desc = 0;
593 u16 num_elems;
594 u16 i;
595
596 rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
597 GFP_KERNEL);
598
599 if (!rbuf)
600 return ICE_ERR_NO_MEMORY;
601
602
603
604
605
606
607 do {
608 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
609 &req_desc, &num_elems, NULL);
610
611 if (status)
612 break;
613
614 for (i = 0; i < num_elems; i++) {
615 struct ice_aqc_get_sw_cfg_resp_elem *ele;
616 u16 pf_vf_num, swid, vsi_port_num;
617 bool is_vf = false;
618 u8 type;
619
620 ele = rbuf[i].elements;
621 vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
622 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
623
624 pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
625 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
626
627 swid = le16_to_cpu(ele->swid);
628
629 if (le16_to_cpu(ele->pf_vf_num) &
630 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
631 is_vf = true;
632
633 type = le16_to_cpu(ele->vsi_port_num) >>
634 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
635
636 if (type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
637
638 continue;
639 }
640
641 ice_init_port_info(hw->port_info, vsi_port_num,
642 type, swid, pf_vf_num, is_vf);
643 }
644 } while (req_desc && !status);
645
646 devm_kfree(ice_hw_to_dev(hw), (void *)rbuf);
647 return status;
648}
649
650
651
652
653
654
655
656
657
658
659static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
660{
661 fi->lb_en = false;
662 fi->lan_en = false;
663 if ((fi->flag & ICE_FLTR_TX) &&
664 (fi->fltr_act == ICE_FWD_TO_VSI ||
665 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
666 fi->fltr_act == ICE_FWD_TO_Q ||
667 fi->fltr_act == ICE_FWD_TO_QGRP)) {
668
669
670
671 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
672 fi->lb_en = true;
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690 if (hw->evb_veb) {
691 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
692 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
693 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
694 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
695 fi->lkup_type == ICE_SW_LKUP_DFLT ||
696 fi->lkup_type == ICE_SW_LKUP_VLAN ||
697 (fi->lkup_type == ICE_SW_LKUP_MAC &&
698 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
699 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
700 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
701 fi->lan_en = true;
702 } else {
703 fi->lan_en = true;
704 }
705 }
706}
707
708
709
710
711
712
713
714
715static void
716ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
717 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
718{
719 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
720 void *daddr = NULL;
721 u16 eth_hdr_sz;
722 u8 *eth_hdr;
723 u32 act = 0;
724 __be16 *off;
725 u8 q_rgn;
726
727 if (opc == ice_aqc_opc_remove_sw_rules) {
728 s_rule->pdata.lkup_tx_rx.act = 0;
729 s_rule->pdata.lkup_tx_rx.index =
730 cpu_to_le16(f_info->fltr_rule_id);
731 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
732 return;
733 }
734
735 eth_hdr_sz = sizeof(dummy_eth_header);
736 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
737
738
739 memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
740 ice_fill_sw_info(hw, f_info);
741
742 switch (f_info->fltr_act) {
743 case ICE_FWD_TO_VSI:
744 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
745 ICE_SINGLE_ACT_VSI_ID_M;
746 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
747 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
748 ICE_SINGLE_ACT_VALID_BIT;
749 break;
750 case ICE_FWD_TO_VSI_LIST:
751 act |= ICE_SINGLE_ACT_VSI_LIST;
752 act |= (f_info->fwd_id.vsi_list_id <<
753 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
754 ICE_SINGLE_ACT_VSI_LIST_ID_M;
755 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
756 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
757 ICE_SINGLE_ACT_VALID_BIT;
758 break;
759 case ICE_FWD_TO_Q:
760 act |= ICE_SINGLE_ACT_TO_Q;
761 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
762 ICE_SINGLE_ACT_Q_INDEX_M;
763 break;
764 case ICE_DROP_PACKET:
765 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
766 ICE_SINGLE_ACT_VALID_BIT;
767 break;
768 case ICE_FWD_TO_QGRP:
769 q_rgn = f_info->qgrp_size > 0 ?
770 (u8)ilog2(f_info->qgrp_size) : 0;
771 act |= ICE_SINGLE_ACT_TO_Q;
772 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
773 ICE_SINGLE_ACT_Q_INDEX_M;
774 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
775 ICE_SINGLE_ACT_Q_REGION_M;
776 break;
777 default:
778 return;
779 }
780
781 if (f_info->lb_en)
782 act |= ICE_SINGLE_ACT_LB_ENABLE;
783 if (f_info->lan_en)
784 act |= ICE_SINGLE_ACT_LAN_ENABLE;
785
786 switch (f_info->lkup_type) {
787 case ICE_SW_LKUP_MAC:
788 daddr = f_info->l_data.mac.mac_addr;
789 break;
790 case ICE_SW_LKUP_VLAN:
791 vlan_id = f_info->l_data.vlan.vlan_id;
792 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
793 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
794 act |= ICE_SINGLE_ACT_PRUNE;
795 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
796 }
797 break;
798 case ICE_SW_LKUP_ETHERTYPE_MAC:
799 daddr = f_info->l_data.ethertype_mac.mac_addr;
800
801 case ICE_SW_LKUP_ETHERTYPE:
802 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
803 *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
804 break;
805 case ICE_SW_LKUP_MAC_VLAN:
806 daddr = f_info->l_data.mac_vlan.mac_addr;
807 vlan_id = f_info->l_data.mac_vlan.vlan_id;
808 break;
809 case ICE_SW_LKUP_PROMISC_VLAN:
810 vlan_id = f_info->l_data.mac_vlan.vlan_id;
811
812 case ICE_SW_LKUP_PROMISC:
813 daddr = f_info->l_data.mac_vlan.mac_addr;
814 break;
815 default:
816 break;
817 }
818
819 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
820 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
821 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
822
823
824 s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
825 s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
826 s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
827
828 if (daddr)
829 ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
830
831 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
832 off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
833 *off = cpu_to_be16(vlan_id);
834 }
835
836
837 if (opc != ice_aqc_opc_update_sw_rules)
838 s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz);
839}
840
841
842
843
844
845
846
847
848
849
850
851static enum ice_status
852ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
853 u16 sw_marker, u16 l_id)
854{
855 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
856
857
858
859
860
861 const u16 num_lg_acts = 3;
862 enum ice_status status;
863 u16 lg_act_size;
864 u16 rules_size;
865 u32 act;
866 u16 id;
867
868 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
869 return ICE_ERR_PARAM;
870
871
872
873
874
875
876 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
877 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
878 lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
879 if (!lg_act)
880 return ICE_ERR_NO_MEMORY;
881
882 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
883
884
885 lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
886 lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
887 lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
888
889
890
891
892 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
893 m_ent->fltr_info.fwd_id.hw_vsi_id;
894
895 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
896 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
897 ICE_LG_ACT_VSI_LIST_ID_M;
898 if (m_ent->vsi_count > 1)
899 act |= ICE_LG_ACT_VSI_LIST;
900 lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
901
902
903 act = ICE_LG_ACT_GENERIC;
904
905 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
906 lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
907
908 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
909 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
910
911
912 act |= ICE_LG_ACT_GENERIC;
913 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
914 ICE_LG_ACT_GENERIC_VALUE_M;
915
916 lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
917
918
919 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
920 ice_aqc_opc_update_sw_rules);
921
922
923 rx_tx->pdata.lkup_tx_rx.act =
924 cpu_to_le32(ICE_SINGLE_ACT_PTR |
925 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
926 ICE_SINGLE_ACT_PTR_VAL_M));
927
928
929
930
931
932 rx_tx->pdata.lkup_tx_rx.index =
933 cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
934
935 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
936 ice_aqc_opc_update_sw_rules, NULL);
937 if (!status) {
938 m_ent->lg_act_idx = l_id;
939 m_ent->sw_marker_id = sw_marker;
940 }
941
942 devm_kfree(ice_hw_to_dev(hw), lg_act);
943 return status;
944}
945
946
947
948
949
950
951
952
953
954
955
956static struct ice_vsi_list_map_info *
957ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
958 u16 vsi_list_id)
959{
960 struct ice_switch_info *sw = hw->switch_info;
961 struct ice_vsi_list_map_info *v_map;
962 int i;
963
964 v_map = devm_kcalloc(ice_hw_to_dev(hw), 1, sizeof(*v_map), GFP_KERNEL);
965 if (!v_map)
966 return NULL;
967
968 v_map->vsi_list_id = vsi_list_id;
969 v_map->ref_cnt = 1;
970 for (i = 0; i < num_vsi; i++)
971 set_bit(vsi_handle_arr[i], v_map->vsi_map);
972
973 list_add(&v_map->list_entry, &sw->vsi_list_map_head);
974 return v_map;
975}
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990static enum ice_status
991ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
992 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
993 enum ice_sw_lkup_type lkup_type)
994{
995 struct ice_aqc_sw_rules_elem *s_rule;
996 enum ice_status status;
997 u16 s_rule_size;
998 u16 type;
999 int i;
1000
1001 if (!num_vsi)
1002 return ICE_ERR_PARAM;
1003
1004 if (lkup_type == ICE_SW_LKUP_MAC ||
1005 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1006 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1007 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1008 lkup_type == ICE_SW_LKUP_PROMISC ||
1009 lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
1010 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
1011 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
1012 else if (lkup_type == ICE_SW_LKUP_VLAN)
1013 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
1014 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
1015 else
1016 return ICE_ERR_PARAM;
1017
1018 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
1019 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
1020 if (!s_rule)
1021 return ICE_ERR_NO_MEMORY;
1022 for (i = 0; i < num_vsi; i++) {
1023 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
1024 status = ICE_ERR_PARAM;
1025 goto exit;
1026 }
1027
1028 s_rule->pdata.vsi_list.vsi[i] =
1029 cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
1030 }
1031
1032 s_rule->type = cpu_to_le16(type);
1033 s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
1034 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
1035
1036 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
1037
1038exit:
1039 devm_kfree(ice_hw_to_dev(hw), s_rule);
1040 return status;
1041}
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051static enum ice_status
1052ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1053 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
1054{
1055 enum ice_status status;
1056
1057 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
1058 ice_aqc_opc_alloc_res);
1059 if (status)
1060 return status;
1061
1062
1063 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
1064 *vsi_list_id, false,
1065 ice_aqc_opc_add_sw_rules, lkup_type);
1066}
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077static enum ice_status
1078ice_create_pkt_fwd_rule(struct ice_hw *hw,
1079 struct ice_fltr_list_entry *f_entry)
1080{
1081 struct ice_fltr_mgmt_list_entry *fm_entry;
1082 struct ice_aqc_sw_rules_elem *s_rule;
1083 enum ice_sw_lkup_type l_type;
1084 struct ice_sw_recipe *recp;
1085 enum ice_status status;
1086
1087 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1088 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
1089 if (!s_rule)
1090 return ICE_ERR_NO_MEMORY;
1091 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
1092 GFP_KERNEL);
1093 if (!fm_entry) {
1094 status = ICE_ERR_NO_MEMORY;
1095 goto ice_create_pkt_fwd_rule_exit;
1096 }
1097
1098 fm_entry->fltr_info = f_entry->fltr_info;
1099
1100
1101 fm_entry->vsi_count = 1;
1102 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
1103 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
1104 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
1105
1106 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
1107 ice_aqc_opc_add_sw_rules);
1108
1109 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1110 ice_aqc_opc_add_sw_rules, NULL);
1111 if (status) {
1112 devm_kfree(ice_hw_to_dev(hw), fm_entry);
1113 goto ice_create_pkt_fwd_rule_exit;
1114 }
1115
1116 f_entry->fltr_info.fltr_rule_id =
1117 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
1118 fm_entry->fltr_info.fltr_rule_id =
1119 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
1120
1121
1122
1123
1124 l_type = fm_entry->fltr_info.lkup_type;
1125 recp = &hw->switch_info->recp_list[l_type];
1126 list_add(&fm_entry->list_entry, &recp->filt_rules);
1127
1128ice_create_pkt_fwd_rule_exit:
1129 devm_kfree(ice_hw_to_dev(hw), s_rule);
1130 return status;
1131}
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141static enum ice_status
1142ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
1143{
1144 struct ice_aqc_sw_rules_elem *s_rule;
1145 enum ice_status status;
1146
1147 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1148 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
1149 if (!s_rule)
1150 return ICE_ERR_NO_MEMORY;
1151
1152 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
1153
1154 s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id);
1155
1156
1157 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1158 ice_aqc_opc_update_sw_rules, NULL);
1159
1160 devm_kfree(ice_hw_to_dev(hw), s_rule);
1161 return status;
1162}
1163
1164
1165
1166
1167
1168
1169
1170enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
1171{
1172 struct ice_switch_info *sw = hw->switch_info;
1173 struct ice_fltr_mgmt_list_entry *fm_entry;
1174 enum ice_status status = 0;
1175 struct list_head *rule_head;
1176 struct mutex *rule_lock;
1177
1178 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
1179 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
1180
1181 mutex_lock(rule_lock);
1182 list_for_each_entry(fm_entry, rule_head, list_entry) {
1183 struct ice_fltr_info *fi = &fm_entry->fltr_info;
1184 u8 *addr = fi->l_data.mac.mac_addr;
1185
1186
1187
1188
1189 if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
1190 (fi->fltr_act == ICE_FWD_TO_VSI ||
1191 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1192 fi->fltr_act == ICE_FWD_TO_Q ||
1193 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1194 status = ice_update_pkt_fwd_rule(hw, fi);
1195 if (status)
1196 break;
1197 }
1198 }
1199
1200 mutex_unlock(rule_lock);
1201
1202 return status;
1203}
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226static enum ice_status
1227ice_add_update_vsi_list(struct ice_hw *hw,
1228 struct ice_fltr_mgmt_list_entry *m_entry,
1229 struct ice_fltr_info *cur_fltr,
1230 struct ice_fltr_info *new_fltr)
1231{
1232 enum ice_status status = 0;
1233 u16 vsi_list_id = 0;
1234
1235 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
1236 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
1237 return ICE_ERR_NOT_IMPL;
1238
1239 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
1240 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
1241 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
1242 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
1243 return ICE_ERR_NOT_IMPL;
1244
1245 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
1246
1247
1248
1249
1250 struct ice_fltr_info tmp_fltr;
1251 u16 vsi_handle_arr[2];
1252
1253
1254 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
1255 return ICE_ERR_ALREADY_EXISTS;
1256
1257 vsi_handle_arr[0] = cur_fltr->vsi_handle;
1258 vsi_handle_arr[1] = new_fltr->vsi_handle;
1259 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
1260 &vsi_list_id,
1261 new_fltr->lkup_type);
1262 if (status)
1263 return status;
1264
1265 tmp_fltr = *new_fltr;
1266 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
1267 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
1268 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
1269
1270
1271
1272 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
1273 if (status)
1274 return status;
1275
1276 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
1277 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
1278 m_entry->vsi_list_info =
1279 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
1280 vsi_list_id);
1281
1282
1283
1284
1285 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
1286 status =
1287 ice_add_marker_act(hw, m_entry,
1288 m_entry->sw_marker_id,
1289 m_entry->lg_act_idx);
1290 } else {
1291 u16 vsi_handle = new_fltr->vsi_handle;
1292 enum ice_adminq_opc opcode;
1293
1294 if (!m_entry->vsi_list_info)
1295 return ICE_ERR_CFG;
1296
1297
1298 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
1299 return 0;
1300
1301
1302
1303
1304 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
1305 opcode = ice_aqc_opc_update_sw_rules;
1306
1307 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
1308 vsi_list_id, false, opcode,
1309 new_fltr->lkup_type);
1310
1311 if (!status)
1312 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
1313 }
1314 if (!status)
1315 m_entry->vsi_count++;
1316 return status;
1317}
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328static struct ice_fltr_mgmt_list_entry *
1329ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
1330{
1331 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
1332 struct ice_switch_info *sw = hw->switch_info;
1333 struct list_head *list_head;
1334
1335 list_head = &sw->recp_list[recp_id].filt_rules;
1336 list_for_each_entry(list_itr, list_head, list_entry) {
1337 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
1338 sizeof(f_info->l_data)) &&
1339 f_info->flag == list_itr->fltr_info.flag) {
1340 ret = list_itr;
1341 break;
1342 }
1343 }
1344 return ret;
1345}
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358static struct ice_vsi_list_map_info *
1359ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
1360 u16 *vsi_list_id)
1361{
1362 struct ice_vsi_list_map_info *map_info = NULL;
1363 struct ice_switch_info *sw = hw->switch_info;
1364 struct ice_fltr_mgmt_list_entry *list_itr;
1365 struct list_head *list_head;
1366
1367 list_head = &sw->recp_list[recp_id].filt_rules;
1368 list_for_each_entry(list_itr, list_head, list_entry) {
1369 if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
1370 map_info = list_itr->vsi_list_info;
1371 if (test_bit(vsi_handle, map_info->vsi_map)) {
1372 *vsi_list_id = map_info->vsi_list_id;
1373 return map_info;
1374 }
1375 }
1376 }
1377 return NULL;
1378}
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388static enum ice_status
1389ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
1390 struct ice_fltr_list_entry *f_entry)
1391{
1392 struct ice_switch_info *sw = hw->switch_info;
1393 struct ice_fltr_info *new_fltr, *cur_fltr;
1394 struct ice_fltr_mgmt_list_entry *m_entry;
1395 struct mutex *rule_lock;
1396 enum ice_status status = 0;
1397
1398 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
1399 return ICE_ERR_PARAM;
1400 f_entry->fltr_info.fwd_id.hw_vsi_id =
1401 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
1402
1403 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
1404
1405 mutex_lock(rule_lock);
1406 new_fltr = &f_entry->fltr_info;
1407 if (new_fltr->flag & ICE_FLTR_RX)
1408 new_fltr->src = hw->port_info->lport;
1409 else if (new_fltr->flag & ICE_FLTR_TX)
1410 new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
1411
1412 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
1413 if (!m_entry) {
1414 mutex_unlock(rule_lock);
1415 return ice_create_pkt_fwd_rule(hw, f_entry);
1416 }
1417
1418 cur_fltr = &m_entry->fltr_info;
1419 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
1420 mutex_unlock(rule_lock);
1421
1422 return status;
1423}
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434static enum ice_status
1435ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
1436 enum ice_sw_lkup_type lkup_type)
1437{
1438 struct ice_aqc_sw_rules_elem *s_rule;
1439 enum ice_status status;
1440 u16 s_rule_size;
1441
1442 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
1443 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
1444 if (!s_rule)
1445 return ICE_ERR_NO_MEMORY;
1446
1447 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
1448 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
1449
1450
1451
1452
1453 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
1454 ice_aqc_opc_free_res);
1455
1456 devm_kfree(ice_hw_to_dev(hw), s_rule);
1457 return status;
1458}
1459
1460
1461
1462
1463
1464
1465
1466
1467static enum ice_status
1468ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
1469 struct ice_fltr_mgmt_list_entry *fm_list)
1470{
1471 enum ice_sw_lkup_type lkup_type;
1472 enum ice_status status = 0;
1473 u16 vsi_list_id;
1474
1475 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
1476 fm_list->vsi_count == 0)
1477 return ICE_ERR_PARAM;
1478
1479
1480 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
1481 return ICE_ERR_DOES_NOT_EXIST;
1482
1483 lkup_type = fm_list->fltr_info.lkup_type;
1484 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
1485 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
1486 ice_aqc_opc_update_sw_rules,
1487 lkup_type);
1488 if (status)
1489 return status;
1490
1491 fm_list->vsi_count--;
1492 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
1493
1494 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
1495 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
1496 struct ice_vsi_list_map_info *vsi_list_info =
1497 fm_list->vsi_list_info;
1498 u16 rem_vsi_handle;
1499
1500 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
1501 ICE_MAX_VSI);
1502 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
1503 return ICE_ERR_OUT_OF_RANGE;
1504
1505
1506 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
1507 vsi_list_id, true,
1508 ice_aqc_opc_update_sw_rules,
1509 lkup_type);
1510 if (status)
1511 return status;
1512
1513 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
1514 tmp_fltr_info.fwd_id.hw_vsi_id =
1515 ice_get_hw_vsi_num(hw, rem_vsi_handle);
1516 tmp_fltr_info.vsi_handle = rem_vsi_handle;
1517 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
1518 if (status) {
1519 ice_debug(hw, ICE_DBG_SW,
1520 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
1521 tmp_fltr_info.fwd_id.hw_vsi_id, status);
1522 return status;
1523 }
1524
1525 fm_list->fltr_info = tmp_fltr_info;
1526 }
1527
1528 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
1529 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
1530 struct ice_vsi_list_map_info *vsi_list_info =
1531 fm_list->vsi_list_info;
1532
1533
1534 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
1535 if (status) {
1536 ice_debug(hw, ICE_DBG_SW,
1537 "Failed to remove VSI list %d, error %d\n",
1538 vsi_list_id, status);
1539 return status;
1540 }
1541
1542 list_del(&vsi_list_info->list_entry);
1543 devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
1544 fm_list->vsi_list_info = NULL;
1545 }
1546
1547 return status;
1548}
1549
1550
1551
1552
1553
1554
1555
1556static enum ice_status
1557ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
1558 struct ice_fltr_list_entry *f_entry)
1559{
1560 struct ice_switch_info *sw = hw->switch_info;
1561 struct ice_fltr_mgmt_list_entry *list_elem;
1562 struct mutex *rule_lock;
1563 enum ice_status status = 0;
1564 bool remove_rule = false;
1565 u16 vsi_handle;
1566
1567 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
1568 return ICE_ERR_PARAM;
1569 f_entry->fltr_info.fwd_id.hw_vsi_id =
1570 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
1571
1572 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
1573 mutex_lock(rule_lock);
1574 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
1575 if (!list_elem) {
1576 status = ICE_ERR_DOES_NOT_EXIST;
1577 goto exit;
1578 }
1579
1580 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
1581 remove_rule = true;
1582 } else if (!list_elem->vsi_list_info) {
1583 status = ICE_ERR_DOES_NOT_EXIST;
1584 goto exit;
1585 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
1586
1587
1588
1589
1590
1591 list_elem->vsi_list_info->ref_cnt--;
1592 remove_rule = true;
1593 } else {
1594
1595
1596
1597
1598
1599 vsi_handle = f_entry->fltr_info.vsi_handle;
1600 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
1601 if (status)
1602 goto exit;
1603
1604 if (list_elem->vsi_count == 0)
1605 remove_rule = true;
1606 }
1607
1608 if (remove_rule) {
1609
1610 struct ice_aqc_sw_rules_elem *s_rule;
1611
1612 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1613 ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
1614 GFP_KERNEL);
1615 if (!s_rule) {
1616 status = ICE_ERR_NO_MEMORY;
1617 goto exit;
1618 }
1619
1620 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
1621 ice_aqc_opc_remove_sw_rules);
1622
1623 status = ice_aq_sw_rules(hw, s_rule,
1624 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
1625 ice_aqc_opc_remove_sw_rules, NULL);
1626 if (status)
1627 goto exit;
1628
1629
1630 devm_kfree(ice_hw_to_dev(hw), s_rule);
1631
1632 list_del(&list_elem->list_entry);
1633 devm_kfree(ice_hw_to_dev(hw), list_elem);
1634 }
1635exit:
1636 mutex_unlock(rule_lock);
1637 return status;
1638}
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651enum ice_status
1652ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
1653{
1654 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
1655 struct ice_fltr_list_entry *m_list_itr;
1656 struct list_head *rule_head;
1657 u16 elem_sent, total_elem_left;
1658 struct ice_switch_info *sw;
1659 struct mutex *rule_lock;
1660 enum ice_status status = 0;
1661 u16 num_unicast = 0;
1662 u16 s_rule_size;
1663
1664 if (!m_list || !hw)
1665 return ICE_ERR_PARAM;
1666
1667 s_rule = NULL;
1668 sw = hw->switch_info;
1669 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
1670 list_for_each_entry(m_list_itr, m_list, list_entry) {
1671 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
1672 u16 vsi_handle;
1673 u16 hw_vsi_id;
1674
1675 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
1676 vsi_handle = m_list_itr->fltr_info.vsi_handle;
1677 if (!ice_is_vsi_valid(hw, vsi_handle))
1678 return ICE_ERR_PARAM;
1679 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
1680 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
1681
1682 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
1683 return ICE_ERR_PARAM;
1684 m_list_itr->fltr_info.src = hw_vsi_id;
1685 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
1686 is_zero_ether_addr(add))
1687 return ICE_ERR_PARAM;
1688 if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
1689
1690 mutex_lock(rule_lock);
1691 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
1692 &m_list_itr->fltr_info)) {
1693 mutex_unlock(rule_lock);
1694 return ICE_ERR_ALREADY_EXISTS;
1695 }
1696 mutex_unlock(rule_lock);
1697 num_unicast++;
1698 } else if (is_multicast_ether_addr(add) ||
1699 (is_unicast_ether_addr(add) && hw->ucast_shared)) {
1700 m_list_itr->status =
1701 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
1702 m_list_itr);
1703 if (m_list_itr->status)
1704 return m_list_itr->status;
1705 }
1706 }
1707
1708 mutex_lock(rule_lock);
1709
1710 if (!num_unicast) {
1711 status = 0;
1712 goto ice_add_mac_exit;
1713 }
1714
1715 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
1716
1717
1718 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1719 s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
1720 GFP_KERNEL);
1721 if (!s_rule) {
1722 status = ICE_ERR_NO_MEMORY;
1723 goto ice_add_mac_exit;
1724 }
1725
1726 r_iter = s_rule;
1727 list_for_each_entry(m_list_itr, m_list, list_entry) {
1728 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
1729 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
1730
1731 if (is_unicast_ether_addr(mac_addr)) {
1732 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
1733 ice_aqc_opc_add_sw_rules);
1734 r_iter = (struct ice_aqc_sw_rules_elem *)
1735 ((u8 *)r_iter + s_rule_size);
1736 }
1737 }
1738
1739
1740 r_iter = s_rule;
1741
1742 for (total_elem_left = num_unicast; total_elem_left > 0;
1743 total_elem_left -= elem_sent) {
1744 struct ice_aqc_sw_rules_elem *entry = r_iter;
1745
1746 elem_sent = min(total_elem_left,
1747 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
1748 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
1749 elem_sent, ice_aqc_opc_add_sw_rules,
1750 NULL);
1751 if (status)
1752 goto ice_add_mac_exit;
1753 r_iter = (struct ice_aqc_sw_rules_elem *)
1754 ((u8 *)r_iter + (elem_sent * s_rule_size));
1755 }
1756
1757
1758 r_iter = s_rule;
1759 list_for_each_entry(m_list_itr, m_list, list_entry) {
1760 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
1761 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
1762 struct ice_fltr_mgmt_list_entry *fm_entry;
1763
1764 if (is_unicast_ether_addr(mac_addr)) {
1765 f_info->fltr_rule_id =
1766 le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
1767 f_info->fltr_act = ICE_FWD_TO_VSI;
1768
1769 fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
1770 sizeof(*fm_entry), GFP_KERNEL);
1771 if (!fm_entry) {
1772 status = ICE_ERR_NO_MEMORY;
1773 goto ice_add_mac_exit;
1774 }
1775 fm_entry->fltr_info = *f_info;
1776 fm_entry->vsi_count = 1;
1777
1778
1779
1780
1781 list_add(&fm_entry->list_entry, rule_head);
1782 r_iter = (struct ice_aqc_sw_rules_elem *)
1783 ((u8 *)r_iter + s_rule_size);
1784 }
1785 }
1786
1787ice_add_mac_exit:
1788 mutex_unlock(rule_lock);
1789 if (s_rule)
1790 devm_kfree(ice_hw_to_dev(hw), s_rule);
1791 return status;
1792}
1793
1794
1795
1796
1797
1798
1799static enum ice_status
1800ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
1801{
1802 struct ice_switch_info *sw = hw->switch_info;
1803 struct ice_fltr_mgmt_list_entry *v_list_itr;
1804 struct ice_fltr_info *new_fltr, *cur_fltr;
1805 enum ice_sw_lkup_type lkup_type;
1806 u16 vsi_list_id = 0, vsi_handle;
1807 struct mutex *rule_lock;
1808 enum ice_status status = 0;
1809
1810 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
1811 return ICE_ERR_PARAM;
1812
1813 f_entry->fltr_info.fwd_id.hw_vsi_id =
1814 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
1815 new_fltr = &f_entry->fltr_info;
1816
1817
1818 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
1819 return ICE_ERR_PARAM;
1820
1821 if (new_fltr->src_id != ICE_SRC_ID_VSI)
1822 return ICE_ERR_PARAM;
1823
1824 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
1825 lkup_type = new_fltr->lkup_type;
1826 vsi_handle = new_fltr->vsi_handle;
1827 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
1828 mutex_lock(rule_lock);
1829 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
1830 if (!v_list_itr) {
1831 struct ice_vsi_list_map_info *map_info = NULL;
1832
1833 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
1834
1835
1836
1837
1838
1839 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
1840 vsi_handle,
1841 &vsi_list_id);
1842 if (!map_info) {
1843 status = ice_create_vsi_list_rule(hw,
1844 &vsi_handle,
1845 1,
1846 &vsi_list_id,
1847 lkup_type);
1848 if (status)
1849 goto exit;
1850 }
1851
1852 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
1853 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
1854 }
1855
1856 status = ice_create_pkt_fwd_rule(hw, f_entry);
1857 if (!status) {
1858 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
1859 new_fltr);
1860 if (!v_list_itr) {
1861 status = ICE_ERR_DOES_NOT_EXIST;
1862 goto exit;
1863 }
1864
1865 if (map_info) {
1866 v_list_itr->vsi_list_info = map_info;
1867 map_info->ref_cnt++;
1868 } else {
1869 v_list_itr->vsi_list_info =
1870 ice_create_vsi_list_map(hw, &vsi_handle,
1871 1, vsi_list_id);
1872 }
1873 }
1874 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
1875
1876
1877
1878 cur_fltr = &v_list_itr->fltr_info;
1879 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
1880 new_fltr);
1881 } else {
1882
1883
1884
1885
1886
1887 struct ice_fltr_info tmp_fltr;
1888 u16 vsi_handle_arr[2];
1889 u16 cur_handle;
1890
1891
1892
1893
1894 if (v_list_itr->vsi_count > 1 &&
1895 v_list_itr->vsi_list_info->ref_cnt > 1) {
1896 ice_debug(hw, ICE_DBG_SW,
1897 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
1898 status = ICE_ERR_CFG;
1899 goto exit;
1900 }
1901
1902 cur_handle =
1903 find_first_bit(v_list_itr->vsi_list_info->vsi_map,
1904 ICE_MAX_VSI);
1905
1906
1907 if (cur_handle == vsi_handle) {
1908 status = ICE_ERR_ALREADY_EXISTS;
1909 goto exit;
1910 }
1911
1912 vsi_handle_arr[0] = cur_handle;
1913 vsi_handle_arr[1] = vsi_handle;
1914 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
1915 &vsi_list_id, lkup_type);
1916 if (status)
1917 goto exit;
1918
1919 tmp_fltr = v_list_itr->fltr_info;
1920 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
1921 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
1922 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
1923
1924
1925
1926 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
1927 if (status)
1928 goto exit;
1929
1930
1931
1932
1933 v_list_itr->vsi_list_info->ref_cnt--;
1934
1935
1936 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
1937 v_list_itr->vsi_list_info =
1938 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
1939 vsi_list_id);
1940 v_list_itr->vsi_count++;
1941 }
1942
1943exit:
1944 mutex_unlock(rule_lock);
1945 return status;
1946}
1947
1948
1949
1950
1951
1952
1953enum ice_status
1954ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
1955{
1956 struct ice_fltr_list_entry *v_list_itr;
1957
1958 if (!v_list || !hw)
1959 return ICE_ERR_PARAM;
1960
1961 list_for_each_entry(v_list_itr, v_list, list_entry) {
1962 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
1963 return ICE_ERR_PARAM;
1964 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1965 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
1966 if (v_list_itr->status)
1967 return v_list_itr->status;
1968 }
1969 return 0;
1970}
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981enum ice_status
1982ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
1983{
1984 struct ice_fltr_list_entry *em_list_itr;
1985
1986 if (!em_list || !hw)
1987 return ICE_ERR_PARAM;
1988
1989 list_for_each_entry(em_list_itr, em_list, list_entry) {
1990 enum ice_sw_lkup_type l_type =
1991 em_list_itr->fltr_info.lkup_type;
1992
1993 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
1994 l_type != ICE_SW_LKUP_ETHERTYPE)
1995 return ICE_ERR_PARAM;
1996
1997 em_list_itr->status = ice_add_rule_internal(hw, l_type,
1998 em_list_itr);
1999 if (em_list_itr->status)
2000 return em_list_itr->status;
2001 }
2002 return 0;
2003}
2004
2005
2006
2007
2008
2009
2010enum ice_status
2011ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
2012{
2013 struct ice_fltr_list_entry *em_list_itr, *tmp;
2014
2015 if (!em_list || !hw)
2016 return ICE_ERR_PARAM;
2017
2018 list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
2019 enum ice_sw_lkup_type l_type =
2020 em_list_itr->fltr_info.lkup_type;
2021
2022 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
2023 l_type != ICE_SW_LKUP_ETHERTYPE)
2024 return ICE_ERR_PARAM;
2025
2026 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
2027 em_list_itr);
2028 if (em_list_itr->status)
2029 return em_list_itr->status;
2030 }
2031 return 0;
2032}
2033
2034
2035
2036
2037
2038
2039static void
2040ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
2041{
2042 if (!list_empty(rule_head)) {
2043 struct ice_fltr_mgmt_list_entry *entry;
2044 struct ice_fltr_mgmt_list_entry *tmp;
2045
2046 list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
2047 list_del(&entry->list_entry);
2048 devm_kfree(ice_hw_to_dev(hw), entry);
2049 }
2050 }
2051}
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063enum ice_status
2064ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
2065{
2066 struct ice_aqc_sw_rules_elem *s_rule;
2067 struct ice_fltr_info f_info;
2068 enum ice_adminq_opc opcode;
2069 enum ice_status status;
2070 u16 s_rule_size;
2071 u16 hw_vsi_id;
2072
2073 if (!ice_is_vsi_valid(hw, vsi_handle))
2074 return ICE_ERR_PARAM;
2075 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2076
2077 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
2078 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
2079 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2080 if (!s_rule)
2081 return ICE_ERR_NO_MEMORY;
2082
2083 memset(&f_info, 0, sizeof(f_info));
2084
2085 f_info.lkup_type = ICE_SW_LKUP_DFLT;
2086 f_info.flag = direction;
2087 f_info.fltr_act = ICE_FWD_TO_VSI;
2088 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
2089
2090 if (f_info.flag & ICE_FLTR_RX) {
2091 f_info.src = hw->port_info->lport;
2092 f_info.src_id = ICE_SRC_ID_LPORT;
2093 if (!set)
2094 f_info.fltr_rule_id =
2095 hw->port_info->dflt_rx_vsi_rule_id;
2096 } else if (f_info.flag & ICE_FLTR_TX) {
2097 f_info.src_id = ICE_SRC_ID_VSI;
2098 f_info.src = hw_vsi_id;
2099 if (!set)
2100 f_info.fltr_rule_id =
2101 hw->port_info->dflt_tx_vsi_rule_id;
2102 }
2103
2104 if (set)
2105 opcode = ice_aqc_opc_add_sw_rules;
2106 else
2107 opcode = ice_aqc_opc_remove_sw_rules;
2108
2109 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
2110
2111 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
2112 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
2113 goto out;
2114 if (set) {
2115 u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
2116
2117 if (f_info.flag & ICE_FLTR_TX) {
2118 hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
2119 hw->port_info->dflt_tx_vsi_rule_id = index;
2120 } else if (f_info.flag & ICE_FLTR_RX) {
2121 hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
2122 hw->port_info->dflt_rx_vsi_rule_id = index;
2123 }
2124 } else {
2125 if (f_info.flag & ICE_FLTR_TX) {
2126 hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2127 hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
2128 } else if (f_info.flag & ICE_FLTR_RX) {
2129 hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2130 hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
2131 }
2132 }
2133
2134out:
2135 devm_kfree(ice_hw_to_dev(hw), s_rule);
2136 return status;
2137}
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152enum ice_status
2153ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
2154{
2155 struct ice_fltr_list_entry *list_itr, *tmp;
2156
2157 if (!m_list)
2158 return ICE_ERR_PARAM;
2159
2160 list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
2161 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
2162
2163 if (l_type != ICE_SW_LKUP_MAC)
2164 return ICE_ERR_PARAM;
2165 list_itr->status = ice_remove_rule_internal(hw,
2166 ICE_SW_LKUP_MAC,
2167 list_itr);
2168 if (list_itr->status)
2169 return list_itr->status;
2170 }
2171 return 0;
2172}
2173
2174
2175
2176
2177
2178
2179enum ice_status
2180ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
2181{
2182 struct ice_fltr_list_entry *v_list_itr, *tmp;
2183
2184 if (!v_list || !hw)
2185 return ICE_ERR_PARAM;
2186
2187 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
2188 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
2189
2190 if (l_type != ICE_SW_LKUP_VLAN)
2191 return ICE_ERR_PARAM;
2192 v_list_itr->status = ice_remove_rule_internal(hw,
2193 ICE_SW_LKUP_VLAN,
2194 v_list_itr);
2195 if (v_list_itr->status)
2196 return v_list_itr->status;
2197 }
2198 return 0;
2199}
2200
2201
2202
2203
2204
2205
2206static bool
2207ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
2208{
2209 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
2210 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
2211 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
2212 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
2213}
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228static enum ice_status
2229ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
2230 struct list_head *vsi_list_head,
2231 struct ice_fltr_info *fi)
2232{
2233 struct ice_fltr_list_entry *tmp;
2234
2235
2236
2237
2238 tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
2239 if (!tmp)
2240 return ICE_ERR_NO_MEMORY;
2241
2242 tmp->fltr_info = *fi;
2243
2244
2245
2246
2247
2248
2249 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
2250 tmp->fltr_info.vsi_handle = vsi_handle;
2251 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2252
2253 list_add(&tmp->list_entry, vsi_list_head);
2254
2255 return 0;
2256}
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271static enum ice_status
2272ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
2273 struct list_head *lkup_list_head,
2274 struct list_head *vsi_list_head)
2275{
2276 struct ice_fltr_mgmt_list_entry *fm_entry;
2277 enum ice_status status = 0;
2278
2279
2280 if (!ice_is_vsi_valid(hw, vsi_handle))
2281 return ICE_ERR_PARAM;
2282
2283 list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
2284 struct ice_fltr_info *fi;
2285
2286 fi = &fm_entry->fltr_info;
2287 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
2288 continue;
2289
2290 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
2291 vsi_list_head, fi);
2292 if (status)
2293 return status;
2294 }
2295 return status;
2296}
2297
2298
2299
2300
2301
2302
2303
2304
2305static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
2306{
2307 u16 vid = fi->l_data.mac_vlan.vlan_id;
2308 u8 *macaddr = fi->l_data.mac.mac_addr;
2309 bool is_tx_fltr = false;
2310 u8 promisc_mask = 0;
2311
2312 if (fi->flag == ICE_FLTR_TX)
2313 is_tx_fltr = true;
2314
2315 if (is_broadcast_ether_addr(macaddr))
2316 promisc_mask |= is_tx_fltr ?
2317 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
2318 else if (is_multicast_ether_addr(macaddr))
2319 promisc_mask |= is_tx_fltr ?
2320 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
2321 else if (is_unicast_ether_addr(macaddr))
2322 promisc_mask |= is_tx_fltr ?
2323 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
2324 if (vid)
2325 promisc_mask |= is_tx_fltr ?
2326 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
2327
2328 return promisc_mask;
2329}
2330
2331
2332
2333
2334
2335
2336
2337static enum ice_status
2338ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
2339 struct list_head *v_list)
2340{
2341 struct ice_fltr_list_entry *v_list_itr, *tmp;
2342
2343 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
2344 v_list_itr->status =
2345 ice_remove_rule_internal(hw, recp_id, v_list_itr);
2346 if (v_list_itr->status)
2347 return v_list_itr->status;
2348 }
2349 return 0;
2350}
2351
2352
2353
2354
2355
2356
2357
2358
2359enum ice_status
2360ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
2361 u16 vid)
2362{
2363 struct ice_switch_info *sw = hw->switch_info;
2364 struct ice_fltr_list_entry *fm_entry, *tmp;
2365 struct list_head remove_list_head;
2366 struct ice_fltr_mgmt_list_entry *itr;
2367 struct list_head *rule_head;
2368 struct mutex *rule_lock;
2369 enum ice_status status = 0;
2370 u8 recipe_id;
2371
2372 if (!ice_is_vsi_valid(hw, vsi_handle))
2373 return ICE_ERR_PARAM;
2374
2375 if (vid)
2376 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
2377 else
2378 recipe_id = ICE_SW_LKUP_PROMISC;
2379
2380 rule_head = &sw->recp_list[recipe_id].filt_rules;
2381 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
2382
2383 INIT_LIST_HEAD(&remove_list_head);
2384
2385 mutex_lock(rule_lock);
2386 list_for_each_entry(itr, rule_head, list_entry) {
2387 u8 fltr_promisc_mask = 0;
2388
2389 if (!ice_vsi_uses_fltr(itr, vsi_handle))
2390 continue;
2391
2392 fltr_promisc_mask |=
2393 ice_determine_promisc_mask(&itr->fltr_info);
2394
2395
2396 if (fltr_promisc_mask & ~promisc_mask)
2397 continue;
2398
2399 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
2400 &remove_list_head,
2401 &itr->fltr_info);
2402 if (status) {
2403 mutex_unlock(rule_lock);
2404 goto free_fltr_list;
2405 }
2406 }
2407 mutex_unlock(rule_lock);
2408
2409 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
2410
2411free_fltr_list:
2412 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
2413 list_del(&fm_entry->list_entry);
2414 devm_kfree(ice_hw_to_dev(hw), fm_entry);
2415 }
2416
2417 return status;
2418}
2419
2420
2421
2422
2423
2424
2425
2426
2427enum ice_status
2428ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
2429{
2430 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
2431 struct ice_fltr_list_entry f_list_entry;
2432 struct ice_fltr_info new_fltr;
2433 enum ice_status status = 0;
2434 bool is_tx_fltr;
2435 u16 hw_vsi_id;
2436 int pkt_type;
2437 u8 recipe_id;
2438
2439 if (!ice_is_vsi_valid(hw, vsi_handle))
2440 return ICE_ERR_PARAM;
2441 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2442
2443 memset(&new_fltr, 0, sizeof(new_fltr));
2444
2445 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
2446 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
2447 new_fltr.l_data.mac_vlan.vlan_id = vid;
2448 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
2449 } else {
2450 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
2451 recipe_id = ICE_SW_LKUP_PROMISC;
2452 }
2453
2454
2455
2456
2457
2458
2459 while (promisc_mask) {
2460 u8 *mac_addr;
2461
2462 pkt_type = 0;
2463 is_tx_fltr = false;
2464
2465 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
2466 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
2467 pkt_type = UCAST_FLTR;
2468 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
2469 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
2470 pkt_type = UCAST_FLTR;
2471 is_tx_fltr = true;
2472 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
2473 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
2474 pkt_type = MCAST_FLTR;
2475 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
2476 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
2477 pkt_type = MCAST_FLTR;
2478 is_tx_fltr = true;
2479 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
2480 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
2481 pkt_type = BCAST_FLTR;
2482 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
2483 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
2484 pkt_type = BCAST_FLTR;
2485 is_tx_fltr = true;
2486 }
2487
2488
2489 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
2490 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
2491 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
2492 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
2493 is_tx_fltr = true;
2494 }
2495
2496
2497 mac_addr = new_fltr.l_data.mac.mac_addr;
2498 if (pkt_type == BCAST_FLTR) {
2499 eth_broadcast_addr(mac_addr);
2500 } else if (pkt_type == MCAST_FLTR ||
2501 pkt_type == UCAST_FLTR) {
2502
2503 ether_addr_copy(mac_addr, dummy_eth_header);
2504 if (pkt_type == MCAST_FLTR)
2505 mac_addr[0] |= 0x1;
2506 }
2507
2508
2509 new_fltr.flag = 0;
2510 if (is_tx_fltr) {
2511 new_fltr.flag |= ICE_FLTR_TX;
2512 new_fltr.src = hw_vsi_id;
2513 } else {
2514 new_fltr.flag |= ICE_FLTR_RX;
2515 new_fltr.src = hw->port_info->lport;
2516 }
2517
2518 new_fltr.fltr_act = ICE_FWD_TO_VSI;
2519 new_fltr.vsi_handle = vsi_handle;
2520 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
2521 f_list_entry.fltr_info = new_fltr;
2522
2523 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
2524 if (status)
2525 goto set_promisc_exit;
2526 }
2527
2528set_promisc_exit:
2529 return status;
2530}
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541enum ice_status
2542ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
2543 bool rm_vlan_promisc)
2544{
2545 struct ice_switch_info *sw = hw->switch_info;
2546 struct ice_fltr_list_entry *list_itr, *tmp;
2547 struct list_head vsi_list_head;
2548 struct list_head *vlan_head;
2549 struct mutex *vlan_lock;
2550 enum ice_status status;
2551 u16 vlan_id;
2552
2553 INIT_LIST_HEAD(&vsi_list_head);
2554 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
2555 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
2556 mutex_lock(vlan_lock);
2557 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
2558 &vsi_list_head);
2559 mutex_unlock(vlan_lock);
2560 if (status)
2561 goto free_fltr_list;
2562
2563 list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
2564 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
2565 if (rm_vlan_promisc)
2566 status = ice_clear_vsi_promisc(hw, vsi_handle,
2567 promisc_mask, vlan_id);
2568 else
2569 status = ice_set_vsi_promisc(hw, vsi_handle,
2570 promisc_mask, vlan_id);
2571 if (status)
2572 break;
2573 }
2574
2575free_fltr_list:
2576 list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
2577 list_del(&list_itr->list_entry);
2578 devm_kfree(ice_hw_to_dev(hw), list_itr);
2579 }
2580 return status;
2581}
2582
2583
2584
2585
2586
2587
2588
2589static void
2590ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
2591 enum ice_sw_lkup_type lkup)
2592{
2593 struct ice_switch_info *sw = hw->switch_info;
2594 struct ice_fltr_list_entry *fm_entry;
2595 struct list_head remove_list_head;
2596 struct list_head *rule_head;
2597 struct ice_fltr_list_entry *tmp;
2598 struct mutex *rule_lock;
2599 enum ice_status status;
2600
2601 INIT_LIST_HEAD(&remove_list_head);
2602 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
2603 rule_head = &sw->recp_list[lkup].filt_rules;
2604 mutex_lock(rule_lock);
2605 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
2606 &remove_list_head);
2607 mutex_unlock(rule_lock);
2608 if (status)
2609 return;
2610
2611 switch (lkup) {
2612 case ICE_SW_LKUP_MAC:
2613 ice_remove_mac(hw, &remove_list_head);
2614 break;
2615 case ICE_SW_LKUP_VLAN:
2616 ice_remove_vlan(hw, &remove_list_head);
2617 break;
2618 case ICE_SW_LKUP_PROMISC:
2619 case ICE_SW_LKUP_PROMISC_VLAN:
2620 ice_remove_promisc(hw, lkup, &remove_list_head);
2621 break;
2622 case ICE_SW_LKUP_MAC_VLAN:
2623 case ICE_SW_LKUP_ETHERTYPE:
2624 case ICE_SW_LKUP_ETHERTYPE_MAC:
2625 case ICE_SW_LKUP_DFLT:
2626 case ICE_SW_LKUP_LAST:
2627 default:
2628 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
2629 break;
2630 }
2631
2632 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
2633 list_del(&fm_entry->list_entry);
2634 devm_kfree(ice_hw_to_dev(hw), fm_entry);
2635 }
2636}
2637
2638
2639
2640
2641
2642
2643void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
2644{
2645 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
2646 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
2647 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
2648 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
2649 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
2650 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
2651 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
2652 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
2653}
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665static enum ice_status
2666ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
2667 struct list_head *list_head)
2668{
2669 struct ice_fltr_mgmt_list_entry *itr;
2670 enum ice_status status = 0;
2671 u16 hw_vsi_id;
2672
2673 if (list_empty(list_head))
2674 return status;
2675 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2676
2677 list_for_each_entry(itr, list_head, list_entry) {
2678 struct ice_fltr_list_entry f_entry;
2679
2680 f_entry.fltr_info = itr->fltr_info;
2681 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
2682 itr->fltr_info.vsi_handle == vsi_handle) {
2683
2684 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
2685 f_entry.fltr_info.src = hw_vsi_id;
2686 status = ice_add_rule_internal(hw, recp_id, &f_entry);
2687 if (status)
2688 goto end;
2689 continue;
2690 }
2691 if (!itr->vsi_list_info ||
2692 !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
2693 continue;
2694
2695 clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
2696 f_entry.fltr_info.vsi_handle = vsi_handle;
2697 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
2698
2699 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
2700 f_entry.fltr_info.src = hw_vsi_id;
2701 if (recp_id == ICE_SW_LKUP_VLAN)
2702 status = ice_add_vlan_internal(hw, &f_entry);
2703 else
2704 status = ice_add_rule_internal(hw, recp_id, &f_entry);
2705 if (status)
2706 goto end;
2707 }
2708end:
2709 return status;
2710}
2711
2712
2713
2714
2715
2716
2717
2718
2719enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
2720{
2721 struct ice_switch_info *sw = hw->switch_info;
2722 enum ice_status status = 0;
2723 u8 i;
2724
2725 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
2726 struct list_head *head;
2727
2728 head = &sw->recp_list[i].filt_replay_rules;
2729 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
2730 if (status)
2731 return status;
2732 }
2733 return status;
2734}
2735
2736
2737
2738
2739
2740
2741
2742void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
2743{
2744 struct ice_switch_info *sw = hw->switch_info;
2745 u8 i;
2746
2747 if (!sw)
2748 return;
2749
2750 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
2751 if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
2752 struct list_head *l_head;
2753
2754 l_head = &sw->recp_list[i].filt_replay_rules;
2755 ice_rem_sw_rule_info(hw, l_head);
2756 }
2757 }
2758}
2759