1
2
3
4#include "ice_lib.h"
5#include "ice_switch.h"
6
7#define ICE_ETH_DA_OFFSET 0
8#define ICE_ETH_ETHTYPE_OFFSET 12
9#define ICE_ETH_VLAN_TCI_OFFSET 14
10#define ICE_MAX_VLAN_ID 0xFFF
11#define ICE_IPV6_ETHER_ID 0x86DD
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#define DUMMY_ETH_HDR_LEN 16
29static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
30 0x2, 0, 0, 0, 0, 0,
31 0x81, 0, 0, 0};
32
33struct ice_dummy_pkt_offsets {
34 enum ice_protocol_type type;
35 u16 offset;
36};
37
38static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
39 { ICE_MAC_OFOS, 0 },
40 { ICE_ETYPE_OL, 12 },
41 { ICE_IPV4_OFOS, 14 },
42 { ICE_NVGRE, 34 },
43 { ICE_MAC_IL, 42 },
44 { ICE_IPV4_IL, 56 },
45 { ICE_TCP_IL, 76 },
46 { ICE_PROTOCOL_LAST, 0 },
47};
48
49static const u8 dummy_gre_tcp_packet[] = {
50 0x00, 0x00, 0x00, 0x00,
51 0x00, 0x00, 0x00, 0x00,
52 0x00, 0x00, 0x00, 0x00,
53
54 0x08, 0x00,
55
56 0x45, 0x00, 0x00, 0x3E,
57 0x00, 0x00, 0x00, 0x00,
58 0x00, 0x2F, 0x00, 0x00,
59 0x00, 0x00, 0x00, 0x00,
60 0x00, 0x00, 0x00, 0x00,
61
62 0x80, 0x00, 0x65, 0x58,
63 0x00, 0x00, 0x00, 0x00,
64
65 0x00, 0x00, 0x00, 0x00,
66 0x00, 0x00, 0x00, 0x00,
67 0x00, 0x00, 0x00, 0x00,
68 0x08, 0x00,
69
70 0x45, 0x00, 0x00, 0x14,
71 0x00, 0x00, 0x00, 0x00,
72 0x00, 0x06, 0x00, 0x00,
73 0x00, 0x00, 0x00, 0x00,
74 0x00, 0x00, 0x00, 0x00,
75
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00,
79 0x50, 0x02, 0x20, 0x00,
80 0x00, 0x00, 0x00, 0x00
81};
82
83static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
84 { ICE_MAC_OFOS, 0 },
85 { ICE_ETYPE_OL, 12 },
86 { ICE_IPV4_OFOS, 14 },
87 { ICE_NVGRE, 34 },
88 { ICE_MAC_IL, 42 },
89 { ICE_IPV4_IL, 56 },
90 { ICE_UDP_ILOS, 76 },
91 { ICE_PROTOCOL_LAST, 0 },
92};
93
94static const u8 dummy_gre_udp_packet[] = {
95 0x00, 0x00, 0x00, 0x00,
96 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00,
98
99 0x08, 0x00,
100
101 0x45, 0x00, 0x00, 0x3E,
102 0x00, 0x00, 0x00, 0x00,
103 0x00, 0x2F, 0x00, 0x00,
104 0x00, 0x00, 0x00, 0x00,
105 0x00, 0x00, 0x00, 0x00,
106
107 0x80, 0x00, 0x65, 0x58,
108 0x00, 0x00, 0x00, 0x00,
109
110 0x00, 0x00, 0x00, 0x00,
111 0x00, 0x00, 0x00, 0x00,
112 0x00, 0x00, 0x00, 0x00,
113 0x08, 0x00,
114
115 0x45, 0x00, 0x00, 0x14,
116 0x00, 0x00, 0x00, 0x00,
117 0x00, 0x11, 0x00, 0x00,
118 0x00, 0x00, 0x00, 0x00,
119 0x00, 0x00, 0x00, 0x00,
120
121 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x08, 0x00, 0x00,
123};
124
125static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
126 { ICE_MAC_OFOS, 0 },
127 { ICE_ETYPE_OL, 12 },
128 { ICE_IPV4_OFOS, 14 },
129 { ICE_UDP_OF, 34 },
130 { ICE_VXLAN, 42 },
131 { ICE_GENEVE, 42 },
132 { ICE_VXLAN_GPE, 42 },
133 { ICE_MAC_IL, 50 },
134 { ICE_IPV4_IL, 64 },
135 { ICE_TCP_IL, 84 },
136 { ICE_PROTOCOL_LAST, 0 },
137};
138
139static const u8 dummy_udp_tun_tcp_packet[] = {
140 0x00, 0x00, 0x00, 0x00,
141 0x00, 0x00, 0x00, 0x00,
142 0x00, 0x00, 0x00, 0x00,
143
144 0x08, 0x00,
145
146 0x45, 0x00, 0x00, 0x5a,
147 0x00, 0x01, 0x00, 0x00,
148 0x40, 0x11, 0x00, 0x00,
149 0x00, 0x00, 0x00, 0x00,
150 0x00, 0x00, 0x00, 0x00,
151
152 0x00, 0x00, 0x12, 0xb5,
153 0x00, 0x46, 0x00, 0x00,
154
155 0x00, 0x00, 0x65, 0x58,
156 0x00, 0x00, 0x00, 0x00,
157
158 0x00, 0x00, 0x00, 0x00,
159 0x00, 0x00, 0x00, 0x00,
160 0x00, 0x00, 0x00, 0x00,
161 0x08, 0x00,
162
163 0x45, 0x00, 0x00, 0x28,
164 0x00, 0x01, 0x00, 0x00,
165 0x40, 0x06, 0x00, 0x00,
166 0x00, 0x00, 0x00, 0x00,
167 0x00, 0x00, 0x00, 0x00,
168
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
172 0x50, 0x02, 0x20, 0x00,
173 0x00, 0x00, 0x00, 0x00
174};
175
176static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
177 { ICE_MAC_OFOS, 0 },
178 { ICE_ETYPE_OL, 12 },
179 { ICE_IPV4_OFOS, 14 },
180 { ICE_UDP_OF, 34 },
181 { ICE_VXLAN, 42 },
182 { ICE_GENEVE, 42 },
183 { ICE_VXLAN_GPE, 42 },
184 { ICE_MAC_IL, 50 },
185 { ICE_IPV4_IL, 64 },
186 { ICE_UDP_ILOS, 84 },
187 { ICE_PROTOCOL_LAST, 0 },
188};
189
190static const u8 dummy_udp_tun_udp_packet[] = {
191 0x00, 0x00, 0x00, 0x00,
192 0x00, 0x00, 0x00, 0x00,
193 0x00, 0x00, 0x00, 0x00,
194
195 0x08, 0x00,
196
197 0x45, 0x00, 0x00, 0x4e,
198 0x00, 0x01, 0x00, 0x00,
199 0x00, 0x11, 0x00, 0x00,
200 0x00, 0x00, 0x00, 0x00,
201 0x00, 0x00, 0x00, 0x00,
202
203 0x00, 0x00, 0x12, 0xb5,
204 0x00, 0x3a, 0x00, 0x00,
205
206 0x00, 0x00, 0x65, 0x58,
207 0x00, 0x00, 0x00, 0x00,
208
209 0x00, 0x00, 0x00, 0x00,
210 0x00, 0x00, 0x00, 0x00,
211 0x00, 0x00, 0x00, 0x00,
212 0x08, 0x00,
213
214 0x45, 0x00, 0x00, 0x1c,
215 0x00, 0x01, 0x00, 0x00,
216 0x00, 0x11, 0x00, 0x00,
217 0x00, 0x00, 0x00, 0x00,
218 0x00, 0x00, 0x00, 0x00,
219
220 0x00, 0x00, 0x00, 0x00,
221 0x00, 0x08, 0x00, 0x00,
222};
223
224
225static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
226 { ICE_MAC_OFOS, 0 },
227 { ICE_ETYPE_OL, 12 },
228 { ICE_IPV4_OFOS, 14 },
229 { ICE_UDP_ILOS, 34 },
230 { ICE_PROTOCOL_LAST, 0 },
231};
232
233
234static const u8 dummy_udp_packet[] = {
235 0x00, 0x00, 0x00, 0x00,
236 0x00, 0x00, 0x00, 0x00,
237 0x00, 0x00, 0x00, 0x00,
238
239 0x08, 0x00,
240
241 0x45, 0x00, 0x00, 0x1c,
242 0x00, 0x01, 0x00, 0x00,
243 0x00, 0x11, 0x00, 0x00,
244 0x00, 0x00, 0x00, 0x00,
245 0x00, 0x00, 0x00, 0x00,
246
247 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x08, 0x00, 0x00,
249
250 0x00, 0x00,
251};
252
253
254static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
255 { ICE_MAC_OFOS, 0 },
256 { ICE_VLAN_OFOS, 12 },
257 { ICE_ETYPE_OL, 16 },
258 { ICE_IPV4_OFOS, 18 },
259 { ICE_UDP_ILOS, 38 },
260 { ICE_PROTOCOL_LAST, 0 },
261};
262
263
264static const u8 dummy_vlan_udp_packet[] = {
265 0x00, 0x00, 0x00, 0x00,
266 0x00, 0x00, 0x00, 0x00,
267 0x00, 0x00, 0x00, 0x00,
268
269 0x81, 0x00, 0x00, 0x00,
270
271 0x08, 0x00,
272
273 0x45, 0x00, 0x00, 0x1c,
274 0x00, 0x01, 0x00, 0x00,
275 0x00, 0x11, 0x00, 0x00,
276 0x00, 0x00, 0x00, 0x00,
277 0x00, 0x00, 0x00, 0x00,
278
279 0x00, 0x00, 0x00, 0x00,
280 0x00, 0x08, 0x00, 0x00,
281
282 0x00, 0x00,
283};
284
285
286static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
287 { ICE_MAC_OFOS, 0 },
288 { ICE_ETYPE_OL, 12 },
289 { ICE_IPV4_OFOS, 14 },
290 { ICE_TCP_IL, 34 },
291 { ICE_PROTOCOL_LAST, 0 },
292};
293
294
295static const u8 dummy_tcp_packet[] = {
296 0x00, 0x00, 0x00, 0x00,
297 0x00, 0x00, 0x00, 0x00,
298 0x00, 0x00, 0x00, 0x00,
299
300 0x08, 0x00,
301
302 0x45, 0x00, 0x00, 0x28,
303 0x00, 0x01, 0x00, 0x00,
304 0x00, 0x06, 0x00, 0x00,
305 0x00, 0x00, 0x00, 0x00,
306 0x00, 0x00, 0x00, 0x00,
307
308 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
311 0x50, 0x00, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00,
313
314 0x00, 0x00,
315};
316
317
318static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
319 { ICE_MAC_OFOS, 0 },
320 { ICE_VLAN_OFOS, 12 },
321 { ICE_ETYPE_OL, 16 },
322 { ICE_IPV4_OFOS, 18 },
323 { ICE_TCP_IL, 38 },
324 { ICE_PROTOCOL_LAST, 0 },
325};
326
327
328static const u8 dummy_vlan_tcp_packet[] = {
329 0x00, 0x00, 0x00, 0x00,
330 0x00, 0x00, 0x00, 0x00,
331 0x00, 0x00, 0x00, 0x00,
332
333 0x81, 0x00, 0x00, 0x00,
334
335 0x08, 0x00,
336
337 0x45, 0x00, 0x00, 0x28,
338 0x00, 0x01, 0x00, 0x00,
339 0x00, 0x06, 0x00, 0x00,
340 0x00, 0x00, 0x00, 0x00,
341 0x00, 0x00, 0x00, 0x00,
342
343 0x00, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
346 0x50, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00,
348
349 0x00, 0x00,
350};
351
352static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
353 { ICE_MAC_OFOS, 0 },
354 { ICE_ETYPE_OL, 12 },
355 { ICE_IPV6_OFOS, 14 },
356 { ICE_TCP_IL, 54 },
357 { ICE_PROTOCOL_LAST, 0 },
358};
359
360static const u8 dummy_tcp_ipv6_packet[] = {
361 0x00, 0x00, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00,
363 0x00, 0x00, 0x00, 0x00,
364
365 0x86, 0xDD,
366
367 0x60, 0x00, 0x00, 0x00,
368 0x00, 0x14, 0x06, 0x00,
369 0x00, 0x00, 0x00, 0x00,
370 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
381 0x50, 0x00, 0x00, 0x00,
382 0x00, 0x00, 0x00, 0x00,
383
384 0x00, 0x00,
385};
386
387
388static const struct ice_dummy_pkt_offsets
389dummy_vlan_tcp_ipv6_packet_offsets[] = {
390 { ICE_MAC_OFOS, 0 },
391 { ICE_VLAN_OFOS, 12 },
392 { ICE_ETYPE_OL, 16 },
393 { ICE_IPV6_OFOS, 18 },
394 { ICE_TCP_IL, 58 },
395 { ICE_PROTOCOL_LAST, 0 },
396};
397
398
399static const u8 dummy_vlan_tcp_ipv6_packet[] = {
400 0x00, 0x00, 0x00, 0x00,
401 0x00, 0x00, 0x00, 0x00,
402 0x00, 0x00, 0x00, 0x00,
403
404 0x81, 0x00, 0x00, 0x00,
405
406 0x86, 0xDD,
407
408 0x60, 0x00, 0x00, 0x00,
409 0x00, 0x14, 0x06, 0x00,
410 0x00, 0x00, 0x00, 0x00,
411 0x00, 0x00, 0x00, 0x00,
412 0x00, 0x00, 0x00, 0x00,
413 0x00, 0x00, 0x00, 0x00,
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
421 0x00, 0x00, 0x00, 0x00,
422 0x50, 0x00, 0x00, 0x00,
423 0x00, 0x00, 0x00, 0x00,
424
425 0x00, 0x00,
426};
427
428
429static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
430 { ICE_MAC_OFOS, 0 },
431 { ICE_ETYPE_OL, 12 },
432 { ICE_IPV6_OFOS, 14 },
433 { ICE_UDP_ILOS, 54 },
434 { ICE_PROTOCOL_LAST, 0 },
435};
436
437
438static const u8 dummy_udp_ipv6_packet[] = {
439 0x00, 0x00, 0x00, 0x00,
440 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x00, 0x00, 0x00,
442
443 0x86, 0xDD,
444
445 0x60, 0x00, 0x00, 0x00,
446 0x00, 0x10, 0x11, 0x00,
447 0x00, 0x00, 0x00, 0x00,
448 0x00, 0x00, 0x00, 0x00,
449 0x00, 0x00, 0x00, 0x00,
450 0x00, 0x00, 0x00, 0x00,
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x10, 0x00, 0x00,
458
459 0x00, 0x00, 0x00, 0x00,
460 0x00, 0x00, 0x00, 0x00,
461
462 0x00, 0x00,
463};
464
465
466static const struct ice_dummy_pkt_offsets
467dummy_vlan_udp_ipv6_packet_offsets[] = {
468 { ICE_MAC_OFOS, 0 },
469 { ICE_VLAN_OFOS, 12 },
470 { ICE_ETYPE_OL, 16 },
471 { ICE_IPV6_OFOS, 18 },
472 { ICE_UDP_ILOS, 58 },
473 { ICE_PROTOCOL_LAST, 0 },
474};
475
476
477static const u8 dummy_vlan_udp_ipv6_packet[] = {
478 0x00, 0x00, 0x00, 0x00,
479 0x00, 0x00, 0x00, 0x00,
480 0x00, 0x00, 0x00, 0x00,
481
482 0x81, 0x00, 0x00, 0x00,
483
484 0x86, 0xDD,
485
486 0x60, 0x00, 0x00, 0x00,
487 0x00, 0x08, 0x11, 0x00,
488 0x00, 0x00, 0x00, 0x00,
489 0x00, 0x00, 0x00, 0x00,
490 0x00, 0x00, 0x00, 0x00,
491 0x00, 0x00, 0x00, 0x00,
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x08, 0x00, 0x00,
499
500 0x00, 0x00,
501};
502
503#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
504 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
505 (DUMMY_ETH_HDR_LEN * \
506 sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0])))
507#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
508 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
509#define ICE_SW_RULE_LG_ACT_SIZE(n) \
510 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \
511 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0])))
512#define ICE_SW_RULE_VSI_LIST_SIZE(n) \
513 (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
514 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
515
516
517static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
518 ICE_MAX_NUM_PROFILES);
519
520
521static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
522 ICE_MAX_NUM_RECIPES);
523
524
525
526
527
528
529
530
531int ice_init_def_sw_recp(struct ice_hw *hw)
532{
533 struct ice_sw_recipe *recps;
534 u8 i;
535
536 recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
537 sizeof(*recps), GFP_KERNEL);
538 if (!recps)
539 return -ENOMEM;
540
541 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
542 recps[i].root_rid = i;
543 INIT_LIST_HEAD(&recps[i].filt_rules);
544 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
545 INIT_LIST_HEAD(&recps[i].rg_list);
546 mutex_init(&recps[i].filt_rule_lock);
547 }
548
549 hw->switch_info->recp_list = recps;
550
551 return 0;
552}
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579static int
580ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
581 u16 buf_size, u16 *req_desc, u16 *num_elems,
582 struct ice_sq_cd *cd)
583{
584 struct ice_aqc_get_sw_cfg *cmd;
585 struct ice_aq_desc desc;
586 int status;
587
588 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
589 cmd = &desc.params.get_sw_conf;
590 cmd->element = cpu_to_le16(*req_desc);
591
592 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
593 if (!status) {
594 *req_desc = le16_to_cpu(cmd->element);
595 *num_elems = le16_to_cpu(cmd->num_elems);
596 }
597
598 return status;
599}
600
601
602
603
604
605
606
607
608
609static int
610ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
611 struct ice_sq_cd *cd)
612{
613 struct ice_aqc_add_update_free_vsi_resp *res;
614 struct ice_aqc_add_get_update_free_vsi *cmd;
615 struct ice_aq_desc desc;
616 int status;
617
618 cmd = &desc.params.vsi_cmd;
619 res = &desc.params.add_update_free_vsi_res;
620
621 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
622
623 if (!vsi_ctx->alloc_from_pool)
624 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
625 ICE_AQ_VSI_IS_VALID);
626 cmd->vf_id = vsi_ctx->vf_num;
627
628 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
629
630 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
631
632 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
633 sizeof(vsi_ctx->info), cd);
634
635 if (!status) {
636 vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
637 vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
638 vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
639 }
640
641 return status;
642}
643
644
645
646
647
648
649
650
651
652
653static int
654ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
655 bool keep_vsi_alloc, struct ice_sq_cd *cd)
656{
657 struct ice_aqc_add_update_free_vsi_resp *resp;
658 struct ice_aqc_add_get_update_free_vsi *cmd;
659 struct ice_aq_desc desc;
660 int status;
661
662 cmd = &desc.params.vsi_cmd;
663 resp = &desc.params.add_update_free_vsi_res;
664
665 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
666
667 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
668 if (keep_vsi_alloc)
669 cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
670
671 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
672 if (!status) {
673 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
674 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
675 }
676
677 return status;
678}
679
680
681
682
683
684
685
686
687
688static int
689ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
690 struct ice_sq_cd *cd)
691{
692 struct ice_aqc_add_update_free_vsi_resp *resp;
693 struct ice_aqc_add_get_update_free_vsi *cmd;
694 struct ice_aq_desc desc;
695 int status;
696
697 cmd = &desc.params.vsi_cmd;
698 resp = &desc.params.add_update_free_vsi_res;
699
700 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
701
702 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
703
704 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
705
706 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
707 sizeof(vsi_ctx->info), cd);
708
709 if (!status) {
710 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
711 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
712 }
713
714 return status;
715}
716
717
718
719
720
721
722
723
724bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
725{
726 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
727}
728
729
730
731
732
733
734
735
736
737u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
738{
739 return hw->vsi_ctx[vsi_handle]->vsi_num;
740}
741
742
743
744
745
746
747
748
749struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
750{
751 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
752}
753
754
755
756
757
758
759
760
761
762static void
763ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
764{
765 hw->vsi_ctx[vsi_handle] = vsi;
766}
767
768
769
770
771
772
773static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
774{
775 struct ice_vsi_ctx *vsi;
776 u8 i;
777
778 vsi = ice_get_vsi_ctx(hw, vsi_handle);
779 if (!vsi)
780 return;
781 ice_for_each_traffic_class(i) {
782 if (vsi->lan_q_ctx[i]) {
783 devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
784 vsi->lan_q_ctx[i] = NULL;
785 }
786 if (vsi->rdma_q_ctx[i]) {
787 devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
788 vsi->rdma_q_ctx[i] = NULL;
789 }
790 }
791}
792
793
794
795
796
797
798
799
800static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
801{
802 struct ice_vsi_ctx *vsi;
803
804 vsi = ice_get_vsi_ctx(hw, vsi_handle);
805 if (vsi) {
806 ice_clear_vsi_q_ctx(hw, vsi_handle);
807 devm_kfree(ice_hw_to_dev(hw), vsi);
808 hw->vsi_ctx[vsi_handle] = NULL;
809 }
810}
811
812
813
814
815
816void ice_clear_all_vsi_ctx(struct ice_hw *hw)
817{
818 u16 i;
819
820 for (i = 0; i < ICE_MAX_VSI; i++)
821 ice_clear_vsi_ctx(hw, i);
822}
823
824
825
826
827
828
829
830
831
832
833
834
835int
836ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
837 struct ice_sq_cd *cd)
838{
839 struct ice_vsi_ctx *tmp_vsi_ctx;
840 int status;
841
842 if (vsi_handle >= ICE_MAX_VSI)
843 return -EINVAL;
844 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
845 if (status)
846 return status;
847 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
848 if (!tmp_vsi_ctx) {
849
850 tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
851 sizeof(*tmp_vsi_ctx), GFP_KERNEL);
852 if (!tmp_vsi_ctx) {
853 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
854 return -ENOMEM;
855 }
856 *tmp_vsi_ctx = *vsi_ctx;
857 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
858 } else {
859
860 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
861 }
862
863 return 0;
864}
865
866
867
868
869
870
871
872
873
874
875
876int
877ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
878 bool keep_vsi_alloc, struct ice_sq_cd *cd)
879{
880 int status;
881
882 if (!ice_is_vsi_valid(hw, vsi_handle))
883 return -EINVAL;
884 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
885 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
886 if (!status)
887 ice_clear_vsi_ctx(hw, vsi_handle);
888 return status;
889}
890
891
892
893
894
895
896
897
898
899
900int
901ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
902 struct ice_sq_cd *cd)
903{
904 if (!ice_is_vsi_valid(hw, vsi_handle))
905 return -EINVAL;
906 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
907 return ice_aq_update_vsi(hw, vsi_ctx, cd);
908}
909
910
911
912
913
914
915
916int
917ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
918{
919 struct ice_vsi_ctx *ctx;
920
921 ctx = ice_get_vsi_ctx(hw, vsi_handle);
922 if (!ctx)
923 return -EIO;
924
925 if (enable)
926 ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
927 else
928 ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
929
930 return ice_update_vsi(hw, vsi_handle, ctx, NULL);
931}
932
933
934
935
936
937
938
939
940
941
942static int
943ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
944 enum ice_sw_lkup_type lkup_type,
945 enum ice_adminq_opc opc)
946{
947 struct ice_aqc_alloc_free_res_elem *sw_buf;
948 struct ice_aqc_res_elem *vsi_ele;
949 u16 buf_len;
950 int status;
951
952 buf_len = struct_size(sw_buf, elem, 1);
953 sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
954 if (!sw_buf)
955 return -ENOMEM;
956 sw_buf->num_elems = cpu_to_le16(1);
957
958 if (lkup_type == ICE_SW_LKUP_MAC ||
959 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
960 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
961 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
962 lkup_type == ICE_SW_LKUP_PROMISC ||
963 lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
964 sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
965 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
966 sw_buf->res_type =
967 cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
968 } else {
969 status = -EINVAL;
970 goto ice_aq_alloc_free_vsi_list_exit;
971 }
972
973 if (opc == ice_aqc_opc_free_res)
974 sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
975
976 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
977 if (status)
978 goto ice_aq_alloc_free_vsi_list_exit;
979
980 if (opc == ice_aqc_opc_alloc_res) {
981 vsi_ele = &sw_buf->elem[0];
982 *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
983 }
984
985ice_aq_alloc_free_vsi_list_exit:
986 devm_kfree(ice_hw_to_dev(hw), sw_buf);
987 return status;
988}
989
990
991
992
993
994
995
996
997
998
999
1000
1001int
1002ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1003 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1004{
1005 struct ice_aq_desc desc;
1006 int status;
1007
1008 if (opc != ice_aqc_opc_add_sw_rules &&
1009 opc != ice_aqc_opc_update_sw_rules &&
1010 opc != ice_aqc_opc_remove_sw_rules)
1011 return -EINVAL;
1012
1013 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1014
1015 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1016 desc.params.sw_rules.num_rules_fltr_entry_index =
1017 cpu_to_le16(num_rules);
1018 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1019 if (opc != ice_aqc_opc_add_sw_rules &&
1020 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
1021 status = -ENOENT;
1022
1023 return status;
1024}
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035static int
1036ice_aq_add_recipe(struct ice_hw *hw,
1037 struct ice_aqc_recipe_data_elem *s_recipe_list,
1038 u16 num_recipes, struct ice_sq_cd *cd)
1039{
1040 struct ice_aqc_add_get_recipe *cmd;
1041 struct ice_aq_desc desc;
1042 u16 buf_size;
1043
1044 cmd = &desc.params.add_get_recipe;
1045 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1046
1047 cmd->num_sub_recipes = cpu_to_le16(num_recipes);
1048 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1049
1050 buf_size = num_recipes * sizeof(*s_recipe_list);
1051
1052 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1053}
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072static int
1073ice_aq_get_recipe(struct ice_hw *hw,
1074 struct ice_aqc_recipe_data_elem *s_recipe_list,
1075 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1076{
1077 struct ice_aqc_add_get_recipe *cmd;
1078 struct ice_aq_desc desc;
1079 u16 buf_size;
1080 int status;
1081
1082 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1083 return -EINVAL;
1084
1085 cmd = &desc.params.add_get_recipe;
1086 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1087
1088 cmd->return_index = cpu_to_le16(recipe_root);
1089 cmd->num_sub_recipes = 0;
1090
1091 buf_size = *num_recipes * sizeof(*s_recipe_list);
1092
1093 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1094 *num_recipes = le16_to_cpu(cmd->num_sub_recipes);
1095
1096 return status;
1097}
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107static int
1108ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1109 struct ice_sq_cd *cd)
1110{
1111 struct ice_aqc_recipe_to_profile *cmd;
1112 struct ice_aq_desc desc;
1113
1114 cmd = &desc.params.recipe_to_profile;
1115 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1116 cmd->profile_id = cpu_to_le16(profile_id);
1117
1118
1119
1120 memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
1121
1122 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1123}
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133static int
1134ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1135 struct ice_sq_cd *cd)
1136{
1137 struct ice_aqc_recipe_to_profile *cmd;
1138 struct ice_aq_desc desc;
1139 int status;
1140
1141 cmd = &desc.params.recipe_to_profile;
1142 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1143 cmd->profile_id = cpu_to_le16(profile_id);
1144
1145 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1146 if (!status)
1147 memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
1148
1149 return status;
1150}
1151
1152
1153
1154
1155
1156
1157static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1158{
1159 struct ice_aqc_alloc_free_res_elem *sw_buf;
1160 u16 buf_len;
1161 int status;
1162
1163 buf_len = struct_size(sw_buf, elem, 1);
1164 sw_buf = kzalloc(buf_len, GFP_KERNEL);
1165 if (!sw_buf)
1166 return -ENOMEM;
1167
1168 sw_buf->num_elems = cpu_to_le16(1);
1169 sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
1170 ICE_AQC_RES_TYPE_S) |
1171 ICE_AQC_RES_TYPE_FLAG_SHARED);
1172 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1173 ice_aqc_opc_alloc_res, NULL);
1174 if (!status)
1175 *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
1176 kfree(sw_buf);
1177
1178 return status;
1179}
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1190{
1191 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
1192 u16 i;
1193
1194 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
1195 u16 j;
1196
1197 bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1198 bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
1199 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1200 continue;
1201 bitmap_copy(profile_to_recipe[i], r_bitmap,
1202 ICE_MAX_NUM_RECIPES);
1203 for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
1204 set_bit(i, recipe_to_profile[j]);
1205 }
1206}
1207
1208
1209
1210
1211
1212
1213static void
1214ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1215 struct ice_sw_recipe *recp)
1216{
1217 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1218 set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
1219 recp->res_idxs);
1220}
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233static int
1234ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1235 bool *refresh_required)
1236{
1237 DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
1238 struct ice_aqc_recipe_data_elem *tmp;
1239 u16 num_recps = ICE_MAX_NUM_RECIPES;
1240 struct ice_prot_lkup_ext *lkup_exts;
1241 u8 fv_word_idx = 0;
1242 u16 sub_recps;
1243 int status;
1244
1245 bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
1246
1247
1248 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
1249 if (!tmp)
1250 return -ENOMEM;
1251
1252 tmp[0].recipe_indx = rid;
1253 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1254
1255 if (status)
1256 goto err_unroll;
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266 if (*refresh_required) {
1267 ice_get_recp_to_prof_map(hw);
1268 *refresh_required = false;
1269 }
1270
1271
1272
1273
1274
1275 lkup_exts = &recps[rid].lkup_exts;
1276
1277 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1278 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1279 struct ice_recp_grp_entry *rg_entry;
1280 u8 i, prof, idx, prot = 0;
1281 bool is_root;
1282 u16 off = 0;
1283
1284 rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
1285 GFP_KERNEL);
1286 if (!rg_entry) {
1287 status = -ENOMEM;
1288 goto err_unroll;
1289 }
1290
1291 idx = root_bufs.recipe_indx;
1292 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1293
1294
1295 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1296 set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
1297 result_bm);
1298
1299
1300 prof = find_first_bit(recipe_to_profile[idx],
1301 ICE_MAX_NUM_PROFILES);
1302 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1303 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1304
1305 rg_entry->fv_idx[i] = lkup_indx;
1306 rg_entry->fv_mask[i] =
1307 le16_to_cpu(root_bufs.content.mask[i + 1]);
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318 if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
1319 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1320 rg_entry->fv_idx[i] == 0)
1321 continue;
1322
1323 ice_find_prot_off(hw, ICE_BLK_SW, prof,
1324 rg_entry->fv_idx[i], &prot, &off);
1325 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1326 lkup_exts->fv_words[fv_word_idx].off = off;
1327 lkup_exts->field_mask[fv_word_idx] =
1328 rg_entry->fv_mask[i];
1329 fv_word_idx++;
1330 }
1331
1332
1333
1334 list_add(&rg_entry->l_entry, &recps[rid].rg_list);
1335
1336
1337 recps[idx].is_root = !!is_root;
1338 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1339 bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1340 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1341 recps[idx].chain_idx = root_bufs.content.result_indx &
1342 ~ICE_AQ_RECIPE_RESULT_EN;
1343 set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1344 } else {
1345 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1346 }
1347
1348 if (!is_root)
1349 continue;
1350
1351
1352 memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1353 sizeof(recps[idx].r_bitmap));
1354 recps[idx].root_rid = root_bufs.content.rid &
1355 ~ICE_AQ_RECIPE_ID_IS_ROOT;
1356 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1357 }
1358
1359
1360 lkup_exts->n_val_words = fv_word_idx;
1361 recps[rid].big_recp = (num_recps > 1);
1362 recps[rid].n_grp_count = (u8)num_recps;
1363 recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
1364 recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
1365 GFP_KERNEL);
1366 if (!recps[rid].root_buf) {
1367 status = -ENOMEM;
1368 goto err_unroll;
1369 }
1370
1371
1372 bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1373 recps[rid].recp_created = true;
1374
1375err_unroll:
1376 kfree(tmp);
1377 return status;
1378}
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388static void
1389ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1390 u16 swid, u16 pf_vf_num, bool is_vf)
1391{
1392 switch (type) {
1393 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1394 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1395 pi->sw_id = swid;
1396 pi->pf_vf_num = pf_vf_num;
1397 pi->is_vf = is_vf;
1398 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1399 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1400 break;
1401 default:
1402 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
1403 break;
1404 }
1405}
1406
1407
1408
1409
1410int ice_get_initial_sw_cfg(struct ice_hw *hw)
1411{
1412 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
1413 u16 req_desc = 0;
1414 u16 num_elems;
1415 int status;
1416 u16 i;
1417
1418 rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
1419 GFP_KERNEL);
1420
1421 if (!rbuf)
1422 return -ENOMEM;
1423
1424
1425
1426
1427
1428
1429 do {
1430 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1431
1432 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1433 &req_desc, &num_elems, NULL);
1434
1435 if (status)
1436 break;
1437
1438 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
1439 u16 pf_vf_num, swid, vsi_port_num;
1440 bool is_vf = false;
1441 u8 res_type;
1442
1443 vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
1444 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1445
1446 pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
1447 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1448
1449 swid = le16_to_cpu(ele->swid);
1450
1451 if (le16_to_cpu(ele->pf_vf_num) &
1452 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1453 is_vf = true;
1454
1455 res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
1456 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
1457
1458 if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
1459
1460 continue;
1461 }
1462
1463 ice_init_port_info(hw->port_info, vsi_port_num,
1464 res_type, swid, pf_vf_num, is_vf);
1465 }
1466 } while (req_desc && !status);
1467
1468 devm_kfree(ice_hw_to_dev(hw), rbuf);
1469 return status;
1470}
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1482{
1483 fi->lb_en = false;
1484 fi->lan_en = false;
1485 if ((fi->flag & ICE_FLTR_TX) &&
1486 (fi->fltr_act == ICE_FWD_TO_VSI ||
1487 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1488 fi->fltr_act == ICE_FWD_TO_Q ||
1489 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1490
1491
1492
1493 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1494 fi->lb_en = true;
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512 if (hw->evb_veb) {
1513 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1514 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1515 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1516 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1517 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1518 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1519 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1520 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
1521 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1522 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
1523 fi->lan_en = true;
1524 } else {
1525 fi->lan_en = true;
1526 }
1527 }
1528}
1529
1530
1531
1532
1533
1534
1535
1536
1537static void
1538ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1539 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1540{
1541 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1542 void *daddr = NULL;
1543 u16 eth_hdr_sz;
1544 u8 *eth_hdr;
1545 u32 act = 0;
1546 __be16 *off;
1547 u8 q_rgn;
1548
1549 if (opc == ice_aqc_opc_remove_sw_rules) {
1550 s_rule->pdata.lkup_tx_rx.act = 0;
1551 s_rule->pdata.lkup_tx_rx.index =
1552 cpu_to_le16(f_info->fltr_rule_id);
1553 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1554 return;
1555 }
1556
1557 eth_hdr_sz = sizeof(dummy_eth_header);
1558 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1559
1560
1561 memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
1562 ice_fill_sw_info(hw, f_info);
1563
1564 switch (f_info->fltr_act) {
1565 case ICE_FWD_TO_VSI:
1566 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1567 ICE_SINGLE_ACT_VSI_ID_M;
1568 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1569 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1570 ICE_SINGLE_ACT_VALID_BIT;
1571 break;
1572 case ICE_FWD_TO_VSI_LIST:
1573 act |= ICE_SINGLE_ACT_VSI_LIST;
1574 act |= (f_info->fwd_id.vsi_list_id <<
1575 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1576 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1577 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1578 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1579 ICE_SINGLE_ACT_VALID_BIT;
1580 break;
1581 case ICE_FWD_TO_Q:
1582 act |= ICE_SINGLE_ACT_TO_Q;
1583 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1584 ICE_SINGLE_ACT_Q_INDEX_M;
1585 break;
1586 case ICE_DROP_PACKET:
1587 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1588 ICE_SINGLE_ACT_VALID_BIT;
1589 break;
1590 case ICE_FWD_TO_QGRP:
1591 q_rgn = f_info->qgrp_size > 0 ?
1592 (u8)ilog2(f_info->qgrp_size) : 0;
1593 act |= ICE_SINGLE_ACT_TO_Q;
1594 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1595 ICE_SINGLE_ACT_Q_INDEX_M;
1596 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1597 ICE_SINGLE_ACT_Q_REGION_M;
1598 break;
1599 default:
1600 return;
1601 }
1602
1603 if (f_info->lb_en)
1604 act |= ICE_SINGLE_ACT_LB_ENABLE;
1605 if (f_info->lan_en)
1606 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1607
1608 switch (f_info->lkup_type) {
1609 case ICE_SW_LKUP_MAC:
1610 daddr = f_info->l_data.mac.mac_addr;
1611 break;
1612 case ICE_SW_LKUP_VLAN:
1613 vlan_id = f_info->l_data.vlan.vlan_id;
1614 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1615 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1616 act |= ICE_SINGLE_ACT_PRUNE;
1617 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1618 }
1619 break;
1620 case ICE_SW_LKUP_ETHERTYPE_MAC:
1621 daddr = f_info->l_data.ethertype_mac.mac_addr;
1622 fallthrough;
1623 case ICE_SW_LKUP_ETHERTYPE:
1624 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1625 *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
1626 break;
1627 case ICE_SW_LKUP_MAC_VLAN:
1628 daddr = f_info->l_data.mac_vlan.mac_addr;
1629 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1630 break;
1631 case ICE_SW_LKUP_PROMISC_VLAN:
1632 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1633 fallthrough;
1634 case ICE_SW_LKUP_PROMISC:
1635 daddr = f_info->l_data.mac_vlan.mac_addr;
1636 break;
1637 default:
1638 break;
1639 }
1640
1641 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1642 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1643 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
1644
1645
1646 s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
1647 s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
1648 s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
1649
1650 if (daddr)
1651 ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
1652
1653 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1654 off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1655 *off = cpu_to_be16(vlan_id);
1656 }
1657
1658
1659 if (opc != ice_aqc_opc_update_sw_rules)
1660 s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz);
1661}
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673static int
1674ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1675 u16 sw_marker, u16 l_id)
1676{
1677 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
1678
1679
1680
1681
1682
1683 const u16 num_lg_acts = 3;
1684 u16 lg_act_size;
1685 u16 rules_size;
1686 int status;
1687 u32 act;
1688 u16 id;
1689
1690 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1691 return -EINVAL;
1692
1693
1694
1695
1696
1697
1698 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
1699 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1700 lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
1701 if (!lg_act)
1702 return -ENOMEM;
1703
1704 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
1705
1706
1707 lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
1708 lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
1709 lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
1710
1711
1712
1713
1714 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
1715 m_ent->fltr_info.fwd_id.hw_vsi_id;
1716
1717 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1718 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
1719 if (m_ent->vsi_count > 1)
1720 act |= ICE_LG_ACT_VSI_LIST;
1721 lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
1722
1723
1724 act = ICE_LG_ACT_GENERIC;
1725
1726 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
1727 lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
1728
1729 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
1730 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
1731
1732
1733 act |= ICE_LG_ACT_GENERIC;
1734 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
1735 ICE_LG_ACT_GENERIC_VALUE_M;
1736
1737 lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
1738
1739
1740 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1741 ice_aqc_opc_update_sw_rules);
1742
1743
1744 rx_tx->pdata.lkup_tx_rx.act =
1745 cpu_to_le32(ICE_SINGLE_ACT_PTR |
1746 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
1747 ICE_SINGLE_ACT_PTR_VAL_M));
1748
1749
1750
1751
1752
1753 rx_tx->pdata.lkup_tx_rx.index =
1754 cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
1755
1756 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
1757 ice_aqc_opc_update_sw_rules, NULL);
1758 if (!status) {
1759 m_ent->lg_act_idx = l_id;
1760 m_ent->sw_marker_id = sw_marker;
1761 }
1762
1763 devm_kfree(ice_hw_to_dev(hw), lg_act);
1764 return status;
1765}
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777static struct ice_vsi_list_map_info *
1778ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1779 u16 vsi_list_id)
1780{
1781 struct ice_switch_info *sw = hw->switch_info;
1782 struct ice_vsi_list_map_info *v_map;
1783 int i;
1784
1785 v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL);
1786 if (!v_map)
1787 return NULL;
1788
1789 v_map->vsi_list_id = vsi_list_id;
1790 v_map->ref_cnt = 1;
1791 for (i = 0; i < num_vsi; i++)
1792 set_bit(vsi_handle_arr[i], v_map->vsi_map);
1793
1794 list_add(&v_map->list_entry, &sw->vsi_list_map_head);
1795 return v_map;
1796}
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811static int
1812ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1813 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
1814 enum ice_sw_lkup_type lkup_type)
1815{
1816 struct ice_aqc_sw_rules_elem *s_rule;
1817 u16 s_rule_size;
1818 u16 rule_type;
1819 int status;
1820 int i;
1821
1822 if (!num_vsi)
1823 return -EINVAL;
1824
1825 if (lkup_type == ICE_SW_LKUP_MAC ||
1826 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1827 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1828 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1829 lkup_type == ICE_SW_LKUP_PROMISC ||
1830 lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
1831 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
1832 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
1833 else if (lkup_type == ICE_SW_LKUP_VLAN)
1834 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
1835 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
1836 else
1837 return -EINVAL;
1838
1839 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
1840 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
1841 if (!s_rule)
1842 return -ENOMEM;
1843 for (i = 0; i < num_vsi; i++) {
1844 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
1845 status = -EINVAL;
1846 goto exit;
1847 }
1848
1849 s_rule->pdata.vsi_list.vsi[i] =
1850 cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
1851 }
1852
1853 s_rule->type = cpu_to_le16(rule_type);
1854 s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
1855 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
1856
1857 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
1858
1859exit:
1860 devm_kfree(ice_hw_to_dev(hw), s_rule);
1861 return status;
1862}
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872static int
1873ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1874 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
1875{
1876 int status;
1877
1878 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
1879 ice_aqc_opc_alloc_res);
1880 if (status)
1881 return status;
1882
1883
1884 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
1885 *vsi_list_id, false,
1886 ice_aqc_opc_add_sw_rules, lkup_type);
1887}
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898static int
1899ice_create_pkt_fwd_rule(struct ice_hw *hw,
1900 struct ice_fltr_list_entry *f_entry)
1901{
1902 struct ice_fltr_mgmt_list_entry *fm_entry;
1903 struct ice_aqc_sw_rules_elem *s_rule;
1904 enum ice_sw_lkup_type l_type;
1905 struct ice_sw_recipe *recp;
1906 int status;
1907
1908 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1909 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
1910 if (!s_rule)
1911 return -ENOMEM;
1912 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
1913 GFP_KERNEL);
1914 if (!fm_entry) {
1915 status = -ENOMEM;
1916 goto ice_create_pkt_fwd_rule_exit;
1917 }
1918
1919 fm_entry->fltr_info = f_entry->fltr_info;
1920
1921
1922 fm_entry->vsi_count = 1;
1923 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
1924 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
1925 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
1926
1927 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
1928 ice_aqc_opc_add_sw_rules);
1929
1930 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1931 ice_aqc_opc_add_sw_rules, NULL);
1932 if (status) {
1933 devm_kfree(ice_hw_to_dev(hw), fm_entry);
1934 goto ice_create_pkt_fwd_rule_exit;
1935 }
1936
1937 f_entry->fltr_info.fltr_rule_id =
1938 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
1939 fm_entry->fltr_info.fltr_rule_id =
1940 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
1941
1942
1943
1944
1945 l_type = fm_entry->fltr_info.lkup_type;
1946 recp = &hw->switch_info->recp_list[l_type];
1947 list_add(&fm_entry->list_entry, &recp->filt_rules);
1948
1949ice_create_pkt_fwd_rule_exit:
1950 devm_kfree(ice_hw_to_dev(hw), s_rule);
1951 return status;
1952}
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962static int
1963ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
1964{
1965 struct ice_aqc_sw_rules_elem *s_rule;
1966 int status;
1967
1968 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1969 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
1970 if (!s_rule)
1971 return -ENOMEM;
1972
1973 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
1974
1975 s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id);
1976
1977
1978 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1979 ice_aqc_opc_update_sw_rules, NULL);
1980
1981 devm_kfree(ice_hw_to_dev(hw), s_rule);
1982 return status;
1983}
1984
1985
1986
1987
1988
1989
1990
1991int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
1992{
1993 struct ice_switch_info *sw = hw->switch_info;
1994 struct ice_fltr_mgmt_list_entry *fm_entry;
1995 struct list_head *rule_head;
1996 struct mutex *rule_lock;
1997 int status = 0;
1998
1999 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2000 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2001
2002 mutex_lock(rule_lock);
2003 list_for_each_entry(fm_entry, rule_head, list_entry) {
2004 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2005 u8 *addr = fi->l_data.mac.mac_addr;
2006
2007
2008
2009
2010 if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
2011 (fi->fltr_act == ICE_FWD_TO_VSI ||
2012 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2013 fi->fltr_act == ICE_FWD_TO_Q ||
2014 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2015 status = ice_update_pkt_fwd_rule(hw, fi);
2016 if (status)
2017 break;
2018 }
2019 }
2020
2021 mutex_unlock(rule_lock);
2022
2023 return status;
2024}
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047static int
2048ice_add_update_vsi_list(struct ice_hw *hw,
2049 struct ice_fltr_mgmt_list_entry *m_entry,
2050 struct ice_fltr_info *cur_fltr,
2051 struct ice_fltr_info *new_fltr)
2052{
2053 u16 vsi_list_id = 0;
2054 int status = 0;
2055
2056 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2057 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2058 return -EOPNOTSUPP;
2059
2060 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2061 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2062 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2063 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2064 return -EOPNOTSUPP;
2065
2066 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2067
2068
2069
2070
2071 struct ice_fltr_info tmp_fltr;
2072 u16 vsi_handle_arr[2];
2073
2074
2075 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2076 return -EEXIST;
2077
2078 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2079 vsi_handle_arr[1] = new_fltr->vsi_handle;
2080 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2081 &vsi_list_id,
2082 new_fltr->lkup_type);
2083 if (status)
2084 return status;
2085
2086 tmp_fltr = *new_fltr;
2087 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2088 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2089 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2090
2091
2092
2093 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2094 if (status)
2095 return status;
2096
2097 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2098 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2099 m_entry->vsi_list_info =
2100 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2101 vsi_list_id);
2102
2103 if (!m_entry->vsi_list_info)
2104 return -ENOMEM;
2105
2106
2107
2108
2109 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2110 status =
2111 ice_add_marker_act(hw, m_entry,
2112 m_entry->sw_marker_id,
2113 m_entry->lg_act_idx);
2114 } else {
2115 u16 vsi_handle = new_fltr->vsi_handle;
2116 enum ice_adminq_opc opcode;
2117
2118 if (!m_entry->vsi_list_info)
2119 return -EIO;
2120
2121
2122 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
2123 return 0;
2124
2125
2126
2127
2128 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2129 opcode = ice_aqc_opc_update_sw_rules;
2130
2131 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2132 vsi_list_id, false, opcode,
2133 new_fltr->lkup_type);
2134
2135 if (!status)
2136 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
2137 }
2138 if (!status)
2139 m_entry->vsi_count++;
2140 return status;
2141}
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152static struct ice_fltr_mgmt_list_entry *
2153ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2154{
2155 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2156 struct ice_switch_info *sw = hw->switch_info;
2157 struct list_head *list_head;
2158
2159 list_head = &sw->recp_list[recp_id].filt_rules;
2160 list_for_each_entry(list_itr, list_head, list_entry) {
2161 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2162 sizeof(f_info->l_data)) &&
2163 f_info->flag == list_itr->fltr_info.flag) {
2164 ret = list_itr;
2165 break;
2166 }
2167 }
2168 return ret;
2169}
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182static struct ice_vsi_list_map_info *
2183ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2184 u16 *vsi_list_id)
2185{
2186 struct ice_vsi_list_map_info *map_info = NULL;
2187 struct ice_switch_info *sw = hw->switch_info;
2188 struct ice_fltr_mgmt_list_entry *list_itr;
2189 struct list_head *list_head;
2190
2191 list_head = &sw->recp_list[recp_id].filt_rules;
2192 list_for_each_entry(list_itr, list_head, list_entry) {
2193 if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
2194 map_info = list_itr->vsi_list_info;
2195 if (test_bit(vsi_handle, map_info->vsi_map)) {
2196 *vsi_list_id = map_info->vsi_list_id;
2197 return map_info;
2198 }
2199 }
2200 }
2201 return NULL;
2202}
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212static int
2213ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2214 struct ice_fltr_list_entry *f_entry)
2215{
2216 struct ice_switch_info *sw = hw->switch_info;
2217 struct ice_fltr_info *new_fltr, *cur_fltr;
2218 struct ice_fltr_mgmt_list_entry *m_entry;
2219 struct mutex *rule_lock;
2220 int status = 0;
2221
2222 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2223 return -EINVAL;
2224 f_entry->fltr_info.fwd_id.hw_vsi_id =
2225 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2226
2227 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2228
2229 mutex_lock(rule_lock);
2230 new_fltr = &f_entry->fltr_info;
2231 if (new_fltr->flag & ICE_FLTR_RX)
2232 new_fltr->src = hw->port_info->lport;
2233 else if (new_fltr->flag & ICE_FLTR_TX)
2234 new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
2235
2236 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2237 if (!m_entry) {
2238 mutex_unlock(rule_lock);
2239 return ice_create_pkt_fwd_rule(hw, f_entry);
2240 }
2241
2242 cur_fltr = &m_entry->fltr_info;
2243 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2244 mutex_unlock(rule_lock);
2245
2246 return status;
2247}
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258static int
2259ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2260 enum ice_sw_lkup_type lkup_type)
2261{
2262 struct ice_aqc_sw_rules_elem *s_rule;
2263 u16 s_rule_size;
2264 int status;
2265
2266 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2267 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2268 if (!s_rule)
2269 return -ENOMEM;
2270
2271 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2272 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
2273
2274
2275
2276
2277 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2278 ice_aqc_opc_free_res);
2279
2280 devm_kfree(ice_hw_to_dev(hw), s_rule);
2281 return status;
2282}
2283
2284
2285
2286
2287
2288
2289
2290
2291static int
2292ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2293 struct ice_fltr_mgmt_list_entry *fm_list)
2294{
2295 enum ice_sw_lkup_type lkup_type;
2296 u16 vsi_list_id;
2297 int status = 0;
2298
2299 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2300 fm_list->vsi_count == 0)
2301 return -EINVAL;
2302
2303
2304 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
2305 return -ENOENT;
2306
2307 lkup_type = fm_list->fltr_info.lkup_type;
2308 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2309 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2310 ice_aqc_opc_update_sw_rules,
2311 lkup_type);
2312 if (status)
2313 return status;
2314
2315 fm_list->vsi_count--;
2316 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2317
2318 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2319 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2320 struct ice_vsi_list_map_info *vsi_list_info =
2321 fm_list->vsi_list_info;
2322 u16 rem_vsi_handle;
2323
2324 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
2325 ICE_MAX_VSI);
2326 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2327 return -EIO;
2328
2329
2330 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2331 vsi_list_id, true,
2332 ice_aqc_opc_update_sw_rules,
2333 lkup_type);
2334 if (status)
2335 return status;
2336
2337 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2338 tmp_fltr_info.fwd_id.hw_vsi_id =
2339 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2340 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2341 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2342 if (status) {
2343 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2344 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2345 return status;
2346 }
2347
2348 fm_list->fltr_info = tmp_fltr_info;
2349 }
2350
2351 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2352 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2353 struct ice_vsi_list_map_info *vsi_list_info =
2354 fm_list->vsi_list_info;
2355
2356
2357 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2358 if (status) {
2359 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
2360 vsi_list_id, status);
2361 return status;
2362 }
2363
2364 list_del(&vsi_list_info->list_entry);
2365 devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
2366 fm_list->vsi_list_info = NULL;
2367 }
2368
2369 return status;
2370}
2371
2372
2373
2374
2375
2376
2377
2378static int
2379ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2380 struct ice_fltr_list_entry *f_entry)
2381{
2382 struct ice_switch_info *sw = hw->switch_info;
2383 struct ice_fltr_mgmt_list_entry *list_elem;
2384 struct mutex *rule_lock;
2385 bool remove_rule = false;
2386 u16 vsi_handle;
2387 int status = 0;
2388
2389 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2390 return -EINVAL;
2391 f_entry->fltr_info.fwd_id.hw_vsi_id =
2392 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2393
2394 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2395 mutex_lock(rule_lock);
2396 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2397 if (!list_elem) {
2398 status = -ENOENT;
2399 goto exit;
2400 }
2401
2402 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2403 remove_rule = true;
2404 } else if (!list_elem->vsi_list_info) {
2405 status = -ENOENT;
2406 goto exit;
2407 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2408
2409
2410
2411
2412
2413 list_elem->vsi_list_info->ref_cnt--;
2414 remove_rule = true;
2415 } else {
2416
2417
2418
2419
2420
2421 vsi_handle = f_entry->fltr_info.vsi_handle;
2422 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2423 if (status)
2424 goto exit;
2425
2426 if (list_elem->vsi_count == 0)
2427 remove_rule = true;
2428 }
2429
2430 if (remove_rule) {
2431
2432 struct ice_aqc_sw_rules_elem *s_rule;
2433
2434 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2435 ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
2436 GFP_KERNEL);
2437 if (!s_rule) {
2438 status = -ENOMEM;
2439 goto exit;
2440 }
2441
2442 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2443 ice_aqc_opc_remove_sw_rules);
2444
2445 status = ice_aq_sw_rules(hw, s_rule,
2446 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2447 ice_aqc_opc_remove_sw_rules, NULL);
2448
2449
2450 devm_kfree(ice_hw_to_dev(hw), s_rule);
2451
2452 if (status)
2453 goto exit;
2454
2455 list_del(&list_elem->list_entry);
2456 devm_kfree(ice_hw_to_dev(hw), list_elem);
2457 }
2458exit:
2459 mutex_unlock(rule_lock);
2460 return status;
2461}
2462
2463
2464
2465
2466
2467
2468
2469bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
2470{
2471 struct ice_fltr_mgmt_list_entry *entry;
2472 struct list_head *rule_head;
2473 struct ice_switch_info *sw;
2474 struct mutex *rule_lock;
2475 u16 hw_vsi_id;
2476
2477 if (!ice_is_vsi_valid(hw, vsi_handle))
2478 return false;
2479
2480 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2481 sw = hw->switch_info;
2482 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2483 if (!rule_head)
2484 return false;
2485
2486 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2487 mutex_lock(rule_lock);
2488 list_for_each_entry(entry, rule_head, list_entry) {
2489 struct ice_fltr_info *f_info = &entry->fltr_info;
2490 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2491
2492 if (is_zero_ether_addr(mac_addr))
2493 continue;
2494
2495 if (f_info->flag != ICE_FLTR_TX ||
2496 f_info->src_id != ICE_SRC_ID_VSI ||
2497 f_info->lkup_type != ICE_SW_LKUP_MAC ||
2498 f_info->fltr_act != ICE_FWD_TO_VSI ||
2499 hw_vsi_id != f_info->fwd_id.hw_vsi_id)
2500 continue;
2501
2502 if (ether_addr_equal(mac, mac_addr)) {
2503 mutex_unlock(rule_lock);
2504 return true;
2505 }
2506 }
2507 mutex_unlock(rule_lock);
2508 return false;
2509}
2510
2511
2512
2513
2514
2515
2516
2517bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
2518{
2519 struct ice_fltr_mgmt_list_entry *entry;
2520 struct list_head *rule_head;
2521 struct ice_switch_info *sw;
2522 struct mutex *rule_lock;
2523 u16 hw_vsi_id;
2524
2525 if (vlan_id > ICE_MAX_VLAN_ID)
2526 return false;
2527
2528 if (!ice_is_vsi_valid(hw, vsi_handle))
2529 return false;
2530
2531 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2532 sw = hw->switch_info;
2533 rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
2534 if (!rule_head)
2535 return false;
2536
2537 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
2538 mutex_lock(rule_lock);
2539 list_for_each_entry(entry, rule_head, list_entry) {
2540 struct ice_fltr_info *f_info = &entry->fltr_info;
2541 u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
2542 struct ice_vsi_list_map_info *map_info;
2543
2544 if (entry_vlan_id > ICE_MAX_VLAN_ID)
2545 continue;
2546
2547 if (f_info->flag != ICE_FLTR_TX ||
2548 f_info->src_id != ICE_SRC_ID_VSI ||
2549 f_info->lkup_type != ICE_SW_LKUP_VLAN)
2550 continue;
2551
2552
2553 if (f_info->fltr_act != ICE_FWD_TO_VSI &&
2554 f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
2555 continue;
2556
2557 if (f_info->fltr_act == ICE_FWD_TO_VSI) {
2558 if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
2559 continue;
2560 } else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2561
2562
2563
2564 if (entry->vsi_count == 1 &&
2565 entry->vsi_list_info) {
2566 map_info = entry->vsi_list_info;
2567 if (!test_bit(vsi_handle, map_info->vsi_map))
2568 continue;
2569 }
2570 }
2571
2572 if (vlan_id == entry_vlan_id) {
2573 mutex_unlock(rule_lock);
2574 return true;
2575 }
2576 }
2577 mutex_unlock(rule_lock);
2578
2579 return false;
2580}
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
2594{
2595 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
2596 struct ice_fltr_list_entry *m_list_itr;
2597 struct list_head *rule_head;
2598 u16 total_elem_left, s_rule_size;
2599 struct ice_switch_info *sw;
2600 struct mutex *rule_lock;
2601 u16 num_unicast = 0;
2602 int status = 0;
2603 u8 elem_sent;
2604
2605 if (!m_list || !hw)
2606 return -EINVAL;
2607
2608 s_rule = NULL;
2609 sw = hw->switch_info;
2610 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2611 list_for_each_entry(m_list_itr, m_list, list_entry) {
2612 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
2613 u16 vsi_handle;
2614 u16 hw_vsi_id;
2615
2616 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
2617 vsi_handle = m_list_itr->fltr_info.vsi_handle;
2618 if (!ice_is_vsi_valid(hw, vsi_handle))
2619 return -EINVAL;
2620 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2621 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
2622
2623 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
2624 return -EINVAL;
2625 m_list_itr->fltr_info.src = hw_vsi_id;
2626 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
2627 is_zero_ether_addr(add))
2628 return -EINVAL;
2629 if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
2630
2631 mutex_lock(rule_lock);
2632 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
2633 &m_list_itr->fltr_info)) {
2634 mutex_unlock(rule_lock);
2635 return -EEXIST;
2636 }
2637 mutex_unlock(rule_lock);
2638 num_unicast++;
2639 } else if (is_multicast_ether_addr(add) ||
2640 (is_unicast_ether_addr(add) && hw->ucast_shared)) {
2641 m_list_itr->status =
2642 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
2643 m_list_itr);
2644 if (m_list_itr->status)
2645 return m_list_itr->status;
2646 }
2647 }
2648
2649 mutex_lock(rule_lock);
2650
2651 if (!num_unicast) {
2652 status = 0;
2653 goto ice_add_mac_exit;
2654 }
2655
2656 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2657
2658
2659 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2660 s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
2661 GFP_KERNEL);
2662 if (!s_rule) {
2663 status = -ENOMEM;
2664 goto ice_add_mac_exit;
2665 }
2666
2667 r_iter = s_rule;
2668 list_for_each_entry(m_list_itr, m_list, list_entry) {
2669 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2670 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2671
2672 if (is_unicast_ether_addr(mac_addr)) {
2673 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
2674 ice_aqc_opc_add_sw_rules);
2675 r_iter = (struct ice_aqc_sw_rules_elem *)
2676 ((u8 *)r_iter + s_rule_size);
2677 }
2678 }
2679
2680
2681 r_iter = s_rule;
2682
2683 for (total_elem_left = num_unicast; total_elem_left > 0;
2684 total_elem_left -= elem_sent) {
2685 struct ice_aqc_sw_rules_elem *entry = r_iter;
2686
2687 elem_sent = min_t(u8, total_elem_left,
2688 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
2689 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
2690 elem_sent, ice_aqc_opc_add_sw_rules,
2691 NULL);
2692 if (status)
2693 goto ice_add_mac_exit;
2694 r_iter = (struct ice_aqc_sw_rules_elem *)
2695 ((u8 *)r_iter + (elem_sent * s_rule_size));
2696 }
2697
2698
2699 r_iter = s_rule;
2700 list_for_each_entry(m_list_itr, m_list, list_entry) {
2701 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2702 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2703 struct ice_fltr_mgmt_list_entry *fm_entry;
2704
2705 if (is_unicast_ether_addr(mac_addr)) {
2706 f_info->fltr_rule_id =
2707 le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
2708 f_info->fltr_act = ICE_FWD_TO_VSI;
2709
2710 fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
2711 sizeof(*fm_entry), GFP_KERNEL);
2712 if (!fm_entry) {
2713 status = -ENOMEM;
2714 goto ice_add_mac_exit;
2715 }
2716 fm_entry->fltr_info = *f_info;
2717 fm_entry->vsi_count = 1;
2718
2719
2720
2721
2722 list_add(&fm_entry->list_entry, rule_head);
2723 r_iter = (struct ice_aqc_sw_rules_elem *)
2724 ((u8 *)r_iter + s_rule_size);
2725 }
2726 }
2727
2728ice_add_mac_exit:
2729 mutex_unlock(rule_lock);
2730 if (s_rule)
2731 devm_kfree(ice_hw_to_dev(hw), s_rule);
2732 return status;
2733}
2734
2735
2736
2737
2738
2739
2740static int
2741ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
2742{
2743 struct ice_switch_info *sw = hw->switch_info;
2744 struct ice_fltr_mgmt_list_entry *v_list_itr;
2745 struct ice_fltr_info *new_fltr, *cur_fltr;
2746 enum ice_sw_lkup_type lkup_type;
2747 u16 vsi_list_id = 0, vsi_handle;
2748 struct mutex *rule_lock;
2749 int status = 0;
2750
2751 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2752 return -EINVAL;
2753
2754 f_entry->fltr_info.fwd_id.hw_vsi_id =
2755 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2756 new_fltr = &f_entry->fltr_info;
2757
2758
2759 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
2760 return -EINVAL;
2761
2762 if (new_fltr->src_id != ICE_SRC_ID_VSI)
2763 return -EINVAL;
2764
2765 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
2766 lkup_type = new_fltr->lkup_type;
2767 vsi_handle = new_fltr->vsi_handle;
2768 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
2769 mutex_lock(rule_lock);
2770 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
2771 if (!v_list_itr) {
2772 struct ice_vsi_list_map_info *map_info = NULL;
2773
2774 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
2775
2776
2777
2778
2779
2780 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
2781 vsi_handle,
2782 &vsi_list_id);
2783 if (!map_info) {
2784 status = ice_create_vsi_list_rule(hw,
2785 &vsi_handle,
2786 1,
2787 &vsi_list_id,
2788 lkup_type);
2789 if (status)
2790 goto exit;
2791 }
2792
2793 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2794 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
2795 }
2796
2797 status = ice_create_pkt_fwd_rule(hw, f_entry);
2798 if (!status) {
2799 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
2800 new_fltr);
2801 if (!v_list_itr) {
2802 status = -ENOENT;
2803 goto exit;
2804 }
2805
2806 if (map_info) {
2807 v_list_itr->vsi_list_info = map_info;
2808 map_info->ref_cnt++;
2809 } else {
2810 v_list_itr->vsi_list_info =
2811 ice_create_vsi_list_map(hw, &vsi_handle,
2812 1, vsi_list_id);
2813 }
2814 }
2815 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
2816
2817
2818
2819 cur_fltr = &v_list_itr->fltr_info;
2820 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
2821 new_fltr);
2822 } else {
2823
2824
2825
2826
2827
2828 struct ice_fltr_info tmp_fltr;
2829 u16 vsi_handle_arr[2];
2830 u16 cur_handle;
2831
2832
2833
2834
2835 if (v_list_itr->vsi_count > 1 &&
2836 v_list_itr->vsi_list_info->ref_cnt > 1) {
2837 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
2838 status = -EIO;
2839 goto exit;
2840 }
2841
2842 cur_handle =
2843 find_first_bit(v_list_itr->vsi_list_info->vsi_map,
2844 ICE_MAX_VSI);
2845
2846
2847 if (cur_handle == vsi_handle) {
2848 status = -EEXIST;
2849 goto exit;
2850 }
2851
2852 vsi_handle_arr[0] = cur_handle;
2853 vsi_handle_arr[1] = vsi_handle;
2854 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2855 &vsi_list_id, lkup_type);
2856 if (status)
2857 goto exit;
2858
2859 tmp_fltr = v_list_itr->fltr_info;
2860 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
2861 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2862 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2863
2864
2865
2866 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2867 if (status)
2868 goto exit;
2869
2870
2871
2872
2873 v_list_itr->vsi_list_info->ref_cnt--;
2874
2875
2876 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
2877 v_list_itr->vsi_list_info =
2878 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2879 vsi_list_id);
2880 v_list_itr->vsi_count++;
2881 }
2882
2883exit:
2884 mutex_unlock(rule_lock);
2885 return status;
2886}
2887
2888
2889
2890
2891
2892
2893int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
2894{
2895 struct ice_fltr_list_entry *v_list_itr;
2896
2897 if (!v_list || !hw)
2898 return -EINVAL;
2899
2900 list_for_each_entry(v_list_itr, v_list, list_entry) {
2901 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
2902 return -EINVAL;
2903 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
2904 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
2905 if (v_list_itr->status)
2906 return v_list_itr->status;
2907 }
2908 return 0;
2909}
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
2921{
2922 struct ice_fltr_list_entry *em_list_itr;
2923
2924 if (!em_list || !hw)
2925 return -EINVAL;
2926
2927 list_for_each_entry(em_list_itr, em_list, list_entry) {
2928 enum ice_sw_lkup_type l_type =
2929 em_list_itr->fltr_info.lkup_type;
2930
2931 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
2932 l_type != ICE_SW_LKUP_ETHERTYPE)
2933 return -EINVAL;
2934
2935 em_list_itr->status = ice_add_rule_internal(hw, l_type,
2936 em_list_itr);
2937 if (em_list_itr->status)
2938 return em_list_itr->status;
2939 }
2940 return 0;
2941}
2942
2943
2944
2945
2946
2947
2948int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
2949{
2950 struct ice_fltr_list_entry *em_list_itr, *tmp;
2951
2952 if (!em_list || !hw)
2953 return -EINVAL;
2954
2955 list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
2956 enum ice_sw_lkup_type l_type =
2957 em_list_itr->fltr_info.lkup_type;
2958
2959 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
2960 l_type != ICE_SW_LKUP_ETHERTYPE)
2961 return -EINVAL;
2962
2963 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
2964 em_list_itr);
2965 if (em_list_itr->status)
2966 return em_list_itr->status;
2967 }
2968 return 0;
2969}
2970
2971
2972
2973
2974
2975
2976static void
2977ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
2978{
2979 if (!list_empty(rule_head)) {
2980 struct ice_fltr_mgmt_list_entry *entry;
2981 struct ice_fltr_mgmt_list_entry *tmp;
2982
2983 list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
2984 list_del(&entry->list_entry);
2985 devm_kfree(ice_hw_to_dev(hw), entry);
2986 }
2987 }
2988}
2989
2990
2991
2992
2993
2994
2995static void
2996ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
2997{
2998 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
2999 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3000
3001 if (list_empty(rule_head))
3002 return;
3003
3004 list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
3005 list_del(&lst_itr->list_entry);
3006 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
3007 devm_kfree(ice_hw_to_dev(hw), lst_itr);
3008 }
3009}
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
3022{
3023 struct ice_aqc_sw_rules_elem *s_rule;
3024 struct ice_fltr_info f_info;
3025 enum ice_adminq_opc opcode;
3026 u16 s_rule_size;
3027 u16 hw_vsi_id;
3028 int status;
3029
3030 if (!ice_is_vsi_valid(hw, vsi_handle))
3031 return -EINVAL;
3032 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3033
3034 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3035 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3036
3037 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
3038 if (!s_rule)
3039 return -ENOMEM;
3040
3041 memset(&f_info, 0, sizeof(f_info));
3042
3043 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3044 f_info.flag = direction;
3045 f_info.fltr_act = ICE_FWD_TO_VSI;
3046 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3047
3048 if (f_info.flag & ICE_FLTR_RX) {
3049 f_info.src = hw->port_info->lport;
3050 f_info.src_id = ICE_SRC_ID_LPORT;
3051 if (!set)
3052 f_info.fltr_rule_id =
3053 hw->port_info->dflt_rx_vsi_rule_id;
3054 } else if (f_info.flag & ICE_FLTR_TX) {
3055 f_info.src_id = ICE_SRC_ID_VSI;
3056 f_info.src = hw_vsi_id;
3057 if (!set)
3058 f_info.fltr_rule_id =
3059 hw->port_info->dflt_tx_vsi_rule_id;
3060 }
3061
3062 if (set)
3063 opcode = ice_aqc_opc_add_sw_rules;
3064 else
3065 opcode = ice_aqc_opc_remove_sw_rules;
3066
3067 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3068
3069 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3070 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3071 goto out;
3072 if (set) {
3073 u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
3074
3075 if (f_info.flag & ICE_FLTR_TX) {
3076 hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
3077 hw->port_info->dflt_tx_vsi_rule_id = index;
3078 } else if (f_info.flag & ICE_FLTR_RX) {
3079 hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
3080 hw->port_info->dflt_rx_vsi_rule_id = index;
3081 }
3082 } else {
3083 if (f_info.flag & ICE_FLTR_TX) {
3084 hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3085 hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3086 } else if (f_info.flag & ICE_FLTR_RX) {
3087 hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3088 hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3089 }
3090 }
3091
3092out:
3093 devm_kfree(ice_hw_to_dev(hw), s_rule);
3094 return status;
3095}
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109static struct ice_fltr_mgmt_list_entry *
3110ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3111 struct ice_fltr_info *f_info)
3112{
3113 struct ice_switch_info *sw = hw->switch_info;
3114 struct ice_fltr_mgmt_list_entry *list_itr;
3115 struct list_head *list_head;
3116
3117 list_head = &sw->recp_list[recp_id].filt_rules;
3118 list_for_each_entry(list_itr, list_head, list_entry) {
3119 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3120 sizeof(f_info->l_data)) &&
3121 f_info->fwd_id.hw_vsi_id ==
3122 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3123 f_info->flag == list_itr->fltr_info.flag)
3124 return list_itr;
3125 }
3126 return NULL;
3127}
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
3143{
3144 struct ice_fltr_list_entry *list_itr, *tmp;
3145 struct mutex *rule_lock;
3146
3147 if (!m_list)
3148 return -EINVAL;
3149
3150 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3151 list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
3152 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3153 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3154 u16 vsi_handle;
3155
3156 if (l_type != ICE_SW_LKUP_MAC)
3157 return -EINVAL;
3158
3159 vsi_handle = list_itr->fltr_info.vsi_handle;
3160 if (!ice_is_vsi_valid(hw, vsi_handle))
3161 return -EINVAL;
3162
3163 list_itr->fltr_info.fwd_id.hw_vsi_id =
3164 ice_get_hw_vsi_num(hw, vsi_handle);
3165 if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
3166
3167
3168
3169
3170 mutex_lock(rule_lock);
3171 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3172 &list_itr->fltr_info)) {
3173 mutex_unlock(rule_lock);
3174 return -ENOENT;
3175 }
3176 mutex_unlock(rule_lock);
3177 }
3178 list_itr->status = ice_remove_rule_internal(hw,
3179 ICE_SW_LKUP_MAC,
3180 list_itr);
3181 if (list_itr->status)
3182 return list_itr->status;
3183 }
3184 return 0;
3185}
3186
3187
3188
3189
3190
3191
3192int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
3193{
3194 struct ice_fltr_list_entry *v_list_itr, *tmp;
3195
3196 if (!v_list || !hw)
3197 return -EINVAL;
3198
3199 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
3200 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3201
3202 if (l_type != ICE_SW_LKUP_VLAN)
3203 return -EINVAL;
3204 v_list_itr->status = ice_remove_rule_internal(hw,
3205 ICE_SW_LKUP_VLAN,
3206 v_list_itr);
3207 if (v_list_itr->status)
3208 return v_list_itr->status;
3209 }
3210 return 0;
3211}
3212
3213
3214
3215
3216
3217
3218static bool
3219ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3220{
3221 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3222 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3223 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3224 fm_entry->vsi_list_info &&
3225 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
3226}
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241static int
3242ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3243 struct list_head *vsi_list_head,
3244 struct ice_fltr_info *fi)
3245{
3246 struct ice_fltr_list_entry *tmp;
3247
3248
3249
3250
3251 tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
3252 if (!tmp)
3253 return -ENOMEM;
3254
3255 tmp->fltr_info = *fi;
3256
3257
3258
3259
3260
3261
3262 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3263 tmp->fltr_info.vsi_handle = vsi_handle;
3264 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3265
3266 list_add(&tmp->list_entry, vsi_list_head);
3267
3268 return 0;
3269}
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284static int
3285ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3286 struct list_head *lkup_list_head,
3287 struct list_head *vsi_list_head)
3288{
3289 struct ice_fltr_mgmt_list_entry *fm_entry;
3290 int status = 0;
3291
3292
3293 if (!ice_is_vsi_valid(hw, vsi_handle))
3294 return -EINVAL;
3295
3296 list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
3297 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
3298 continue;
3299
3300 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3301 vsi_list_head,
3302 &fm_entry->fltr_info);
3303 if (status)
3304 return status;
3305 }
3306 return status;
3307}
3308
3309
3310
3311
3312
3313
3314
3315
3316static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3317{
3318 u16 vid = fi->l_data.mac_vlan.vlan_id;
3319 u8 *macaddr = fi->l_data.mac.mac_addr;
3320 bool is_tx_fltr = false;
3321 u8 promisc_mask = 0;
3322
3323 if (fi->flag == ICE_FLTR_TX)
3324 is_tx_fltr = true;
3325
3326 if (is_broadcast_ether_addr(macaddr))
3327 promisc_mask |= is_tx_fltr ?
3328 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3329 else if (is_multicast_ether_addr(macaddr))
3330 promisc_mask |= is_tx_fltr ?
3331 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3332 else if (is_unicast_ether_addr(macaddr))
3333 promisc_mask |= is_tx_fltr ?
3334 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3335 if (vid)
3336 promisc_mask |= is_tx_fltr ?
3337 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3338
3339 return promisc_mask;
3340}
3341
3342
3343
3344
3345
3346
3347
3348static int
3349ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list)
3350{
3351 struct ice_fltr_list_entry *v_list_itr, *tmp;
3352
3353 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
3354 v_list_itr->status =
3355 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3356 if (v_list_itr->status)
3357 return v_list_itr->status;
3358 }
3359 return 0;
3360}
3361
3362
3363
3364
3365
3366
3367
3368
3369int
3370ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3371 u16 vid)
3372{
3373 struct ice_switch_info *sw = hw->switch_info;
3374 struct ice_fltr_list_entry *fm_entry, *tmp;
3375 struct list_head remove_list_head;
3376 struct ice_fltr_mgmt_list_entry *itr;
3377 struct list_head *rule_head;
3378 struct mutex *rule_lock;
3379 int status = 0;
3380 u8 recipe_id;
3381
3382 if (!ice_is_vsi_valid(hw, vsi_handle))
3383 return -EINVAL;
3384
3385 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
3386 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3387 else
3388 recipe_id = ICE_SW_LKUP_PROMISC;
3389
3390 rule_head = &sw->recp_list[recipe_id].filt_rules;
3391 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3392
3393 INIT_LIST_HEAD(&remove_list_head);
3394
3395 mutex_lock(rule_lock);
3396 list_for_each_entry(itr, rule_head, list_entry) {
3397 struct ice_fltr_info *fltr_info;
3398 u8 fltr_promisc_mask = 0;
3399
3400 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3401 continue;
3402 fltr_info = &itr->fltr_info;
3403
3404 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
3405 vid != fltr_info->l_data.mac_vlan.vlan_id)
3406 continue;
3407
3408 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
3409
3410
3411 if (fltr_promisc_mask & ~promisc_mask)
3412 continue;
3413
3414 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3415 &remove_list_head,
3416 fltr_info);
3417 if (status) {
3418 mutex_unlock(rule_lock);
3419 goto free_fltr_list;
3420 }
3421 }
3422 mutex_unlock(rule_lock);
3423
3424 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
3425
3426free_fltr_list:
3427 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
3428 list_del(&fm_entry->list_entry);
3429 devm_kfree(ice_hw_to_dev(hw), fm_entry);
3430 }
3431
3432 return status;
3433}
3434
3435
3436
3437
3438
3439
3440
3441
3442int
3443ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
3444{
3445 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
3446 struct ice_fltr_list_entry f_list_entry;
3447 struct ice_fltr_info new_fltr;
3448 bool is_tx_fltr;
3449 int status = 0;
3450 u16 hw_vsi_id;
3451 int pkt_type;
3452 u8 recipe_id;
3453
3454 if (!ice_is_vsi_valid(hw, vsi_handle))
3455 return -EINVAL;
3456 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3457
3458 memset(&new_fltr, 0, sizeof(new_fltr));
3459
3460 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
3461 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
3462 new_fltr.l_data.mac_vlan.vlan_id = vid;
3463 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3464 } else {
3465 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
3466 recipe_id = ICE_SW_LKUP_PROMISC;
3467 }
3468
3469
3470
3471
3472
3473
3474 while (promisc_mask) {
3475 u8 *mac_addr;
3476
3477 pkt_type = 0;
3478 is_tx_fltr = false;
3479
3480 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
3481 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
3482 pkt_type = UCAST_FLTR;
3483 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
3484 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
3485 pkt_type = UCAST_FLTR;
3486 is_tx_fltr = true;
3487 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
3488 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
3489 pkt_type = MCAST_FLTR;
3490 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
3491 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
3492 pkt_type = MCAST_FLTR;
3493 is_tx_fltr = true;
3494 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
3495 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
3496 pkt_type = BCAST_FLTR;
3497 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
3498 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
3499 pkt_type = BCAST_FLTR;
3500 is_tx_fltr = true;
3501 }
3502
3503
3504 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
3505 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
3506 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
3507 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
3508 is_tx_fltr = true;
3509 }
3510
3511
3512 mac_addr = new_fltr.l_data.mac.mac_addr;
3513 if (pkt_type == BCAST_FLTR) {
3514 eth_broadcast_addr(mac_addr);
3515 } else if (pkt_type == MCAST_FLTR ||
3516 pkt_type == UCAST_FLTR) {
3517
3518 ether_addr_copy(mac_addr, dummy_eth_header);
3519 if (pkt_type == MCAST_FLTR)
3520 mac_addr[0] |= 0x1;
3521 }
3522
3523
3524 new_fltr.flag = 0;
3525 if (is_tx_fltr) {
3526 new_fltr.flag |= ICE_FLTR_TX;
3527 new_fltr.src = hw_vsi_id;
3528 } else {
3529 new_fltr.flag |= ICE_FLTR_RX;
3530 new_fltr.src = hw->port_info->lport;
3531 }
3532
3533 new_fltr.fltr_act = ICE_FWD_TO_VSI;
3534 new_fltr.vsi_handle = vsi_handle;
3535 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
3536 f_list_entry.fltr_info = new_fltr;
3537
3538 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
3539 if (status)
3540 goto set_promisc_exit;
3541 }
3542
3543set_promisc_exit:
3544 return status;
3545}
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556int
3557ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3558 bool rm_vlan_promisc)
3559{
3560 struct ice_switch_info *sw = hw->switch_info;
3561 struct ice_fltr_list_entry *list_itr, *tmp;
3562 struct list_head vsi_list_head;
3563 struct list_head *vlan_head;
3564 struct mutex *vlan_lock;
3565 u16 vlan_id;
3566 int status;
3567
3568 INIT_LIST_HEAD(&vsi_list_head);
3569 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3570 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3571 mutex_lock(vlan_lock);
3572 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
3573 &vsi_list_head);
3574 mutex_unlock(vlan_lock);
3575 if (status)
3576 goto free_fltr_list;
3577
3578 list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
3579 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
3580 if (rm_vlan_promisc)
3581 status = ice_clear_vsi_promisc(hw, vsi_handle,
3582 promisc_mask, vlan_id);
3583 else
3584 status = ice_set_vsi_promisc(hw, vsi_handle,
3585 promisc_mask, vlan_id);
3586 if (status)
3587 break;
3588 }
3589
3590free_fltr_list:
3591 list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
3592 list_del(&list_itr->list_entry);
3593 devm_kfree(ice_hw_to_dev(hw), list_itr);
3594 }
3595 return status;
3596}
3597
3598
3599
3600
3601
3602
3603
3604static void
3605ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
3606 enum ice_sw_lkup_type lkup)
3607{
3608 struct ice_switch_info *sw = hw->switch_info;
3609 struct ice_fltr_list_entry *fm_entry;
3610 struct list_head remove_list_head;
3611 struct list_head *rule_head;
3612 struct ice_fltr_list_entry *tmp;
3613 struct mutex *rule_lock;
3614 int status;
3615
3616 INIT_LIST_HEAD(&remove_list_head);
3617 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
3618 rule_head = &sw->recp_list[lkup].filt_rules;
3619 mutex_lock(rule_lock);
3620 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
3621 &remove_list_head);
3622 mutex_unlock(rule_lock);
3623 if (status)
3624 goto free_fltr_list;
3625
3626 switch (lkup) {
3627 case ICE_SW_LKUP_MAC:
3628 ice_remove_mac(hw, &remove_list_head);
3629 break;
3630 case ICE_SW_LKUP_VLAN:
3631 ice_remove_vlan(hw, &remove_list_head);
3632 break;
3633 case ICE_SW_LKUP_PROMISC:
3634 case ICE_SW_LKUP_PROMISC_VLAN:
3635 ice_remove_promisc(hw, lkup, &remove_list_head);
3636 break;
3637 case ICE_SW_LKUP_MAC_VLAN:
3638 case ICE_SW_LKUP_ETHERTYPE:
3639 case ICE_SW_LKUP_ETHERTYPE_MAC:
3640 case ICE_SW_LKUP_DFLT:
3641 case ICE_SW_LKUP_LAST:
3642 default:
3643 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
3644 break;
3645 }
3646
3647free_fltr_list:
3648 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
3649 list_del(&fm_entry->list_entry);
3650 devm_kfree(ice_hw_to_dev(hw), fm_entry);
3651 }
3652}
3653
3654
3655
3656
3657
3658
3659void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
3660{
3661 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
3662 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
3663 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
3664 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
3665 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
3666 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
3667 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
3668 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
3669}
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679int
3680ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
3681 u16 *counter_id)
3682{
3683 struct ice_aqc_alloc_free_res_elem *buf;
3684 u16 buf_len;
3685 int status;
3686
3687
3688 buf_len = struct_size(buf, elem, 1);
3689 buf = kzalloc(buf_len, GFP_KERNEL);
3690 if (!buf)
3691 return -ENOMEM;
3692
3693 buf->num_elems = cpu_to_le16(num_items);
3694 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
3695 ICE_AQC_RES_TYPE_M) | alloc_shared);
3696
3697 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
3698 ice_aqc_opc_alloc_res, NULL);
3699 if (status)
3700 goto exit;
3701
3702 *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
3703
3704exit:
3705 kfree(buf);
3706 return status;
3707}
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717int
3718ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
3719 u16 counter_id)
3720{
3721 struct ice_aqc_alloc_free_res_elem *buf;
3722 u16 buf_len;
3723 int status;
3724
3725
3726 buf_len = struct_size(buf, elem, 1);
3727 buf = kzalloc(buf_len, GFP_KERNEL);
3728 if (!buf)
3729 return -ENOMEM;
3730
3731 buf->num_elems = cpu_to_le16(num_items);
3732 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
3733 ICE_AQC_RES_TYPE_M) | alloc_shared);
3734 buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
3735
3736 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
3737 ice_aqc_opc_free_res, NULL);
3738 if (status)
3739 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
3740
3741 kfree(buf);
3742 return status;
3743}
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
3755 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
3756 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
3757 { ICE_ETYPE_OL, { 0 } },
3758 { ICE_VLAN_OFOS, { 2, 0 } },
3759 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
3760 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
3761 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
3762 26, 28, 30, 32, 34, 36, 38 } },
3763 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
3764 26, 28, 30, 32, 34, 36, 38 } },
3765 { ICE_TCP_IL, { 0, 2 } },
3766 { ICE_UDP_OF, { 0, 2 } },
3767 { ICE_UDP_ILOS, { 0, 2 } },
3768 { ICE_VXLAN, { 8, 10, 12, 14 } },
3769 { ICE_GENEVE, { 8, 10, 12, 14 } },
3770 { ICE_NVGRE, { 0, 2, 4, 6 } },
3771};
3772
3773static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
3774 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
3775 { ICE_MAC_IL, ICE_MAC_IL_HW },
3776 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
3777 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
3778 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
3779 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
3780 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
3781 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
3782 { ICE_TCP_IL, ICE_TCP_IL_HW },
3783 { ICE_UDP_OF, ICE_UDP_OF_HW },
3784 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
3785 { ICE_VXLAN, ICE_UDP_OF_HW },
3786 { ICE_GENEVE, ICE_UDP_OF_HW },
3787 { ICE_NVGRE, ICE_GRE_OF_HW },
3788};
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798static u16
3799ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
3800 enum ice_sw_tunnel_type tun_type)
3801{
3802 bool refresh_required = true;
3803 struct ice_sw_recipe *recp;
3804 u8 i;
3805
3806
3807 recp = hw->switch_info->recp_list;
3808 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3809
3810
3811
3812
3813
3814 if (!recp[i].recp_created)
3815 if (ice_get_recp_frm_fw(hw,
3816 hw->switch_info->recp_list, i,
3817 &refresh_required))
3818 continue;
3819
3820
3821 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
3822 ICE_AQ_RECIPE_ACT_INV_ACT)
3823 continue;
3824
3825
3826 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
3827 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
3828 struct ice_fv_word *be = lkup_exts->fv_words;
3829 u16 *cr = recp[i].lkup_exts.field_mask;
3830 u16 *de = lkup_exts->field_mask;
3831 bool found = true;
3832 u8 pe, qr;
3833
3834
3835
3836
3837 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
3838 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
3839 qr++) {
3840 if (ar[qr].off == be[pe].off &&
3841 ar[qr].prot_id == be[pe].prot_id &&
3842 cr[qr] == de[pe])
3843
3844
3845
3846 break;
3847 }
3848
3849
3850
3851
3852
3853
3854 if (qr >= recp[i].lkup_exts.n_val_words) {
3855 found = false;
3856 break;
3857 }
3858 }
3859
3860
3861
3862
3863 if (found && recp[i].tun_type == tun_type)
3864 return i;
3865 }
3866 }
3867 return ICE_MAX_NUM_RECIPES;
3868}
3869
3870
3871
3872
3873
3874
3875
3876
3877static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
3878{
3879 u8 i;
3880
3881 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
3882 if (ice_prot_id_tbl[i].type == type) {
3883 *id = ice_prot_id_tbl[i].protocol_id;
3884 return true;
3885 }
3886 return false;
3887}
3888
3889
3890
3891
3892
3893
3894
3895
3896static u8
3897ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
3898 struct ice_prot_lkup_ext *lkup_exts)
3899{
3900 u8 j, word, prot_id, ret_val;
3901
3902 if (!ice_prot_type_to_id(rule->type, &prot_id))
3903 return 0;
3904
3905 word = lkup_exts->n_val_words;
3906
3907 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
3908 if (((u16 *)&rule->m_u)[j] &&
3909 rule->type < ARRAY_SIZE(ice_prot_ext)) {
3910
3911 if (word >= ICE_MAX_CHAIN_WORDS)
3912 return 0;
3913 lkup_exts->fv_words[word].off =
3914 ice_prot_ext[rule->type].offs[j];
3915 lkup_exts->fv_words[word].prot_id =
3916 ice_prot_id_tbl[rule->type].protocol_id;
3917 lkup_exts->field_mask[word] =
3918 be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
3919 word++;
3920 }
3921
3922 ret_val = word - lkup_exts->n_val_words;
3923 lkup_exts->n_val_words = word;
3924
3925 return ret_val;
3926}
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939static int
3940ice_create_first_fit_recp_def(struct ice_hw *hw,
3941 struct ice_prot_lkup_ext *lkup_exts,
3942 struct list_head *rg_list,
3943 u8 *recp_cnt)
3944{
3945 struct ice_pref_recipe_group *grp = NULL;
3946 u8 j;
3947
3948 *recp_cnt = 0;
3949
3950
3951
3952
3953 for (j = 0; j < lkup_exts->n_val_words; j++)
3954 if (!test_bit(j, lkup_exts->done)) {
3955 if (!grp ||
3956 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
3957 struct ice_recp_grp_entry *entry;
3958
3959 entry = devm_kzalloc(ice_hw_to_dev(hw),
3960 sizeof(*entry),
3961 GFP_KERNEL);
3962 if (!entry)
3963 return -ENOMEM;
3964 list_add(&entry->l_entry, rg_list);
3965 grp = &entry->r_group;
3966 (*recp_cnt)++;
3967 }
3968
3969 grp->pairs[grp->n_val_pairs].prot_id =
3970 lkup_exts->fv_words[j].prot_id;
3971 grp->pairs[grp->n_val_pairs].off =
3972 lkup_exts->fv_words[j].off;
3973 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
3974 grp->n_val_pairs++;
3975 }
3976
3977 return 0;
3978}
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989static int
3990ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
3991 struct list_head *rg_list)
3992{
3993 struct ice_sw_fv_list_entry *fv;
3994 struct ice_recp_grp_entry *rg;
3995 struct ice_fv_word *fv_ext;
3996
3997 if (list_empty(fv_list))
3998 return 0;
3999
4000 fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
4001 list_entry);
4002 fv_ext = fv->fv_ptr->ew;
4003
4004 list_for_each_entry(rg, rg_list, l_entry) {
4005 u8 i;
4006
4007 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4008 struct ice_fv_word *pr;
4009 bool found = false;
4010 u16 mask;
4011 u8 j;
4012
4013 pr = &rg->r_group.pairs[i];
4014 mask = rg->r_group.mask[i];
4015
4016 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4017 if (fv_ext[j].prot_id == pr->prot_id &&
4018 fv_ext[j].off == pr->off) {
4019 found = true;
4020
4021
4022 rg->fv_idx[i] = j;
4023 rg->fv_mask[i] = mask;
4024 break;
4025 }
4026
4027
4028
4029
4030 if (!found)
4031 return -EINVAL;
4032 }
4033 }
4034
4035 return 0;
4036}
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064static u16
4065ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
4066 unsigned long *free_idx)
4067{
4068 DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
4069 DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
4070 DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
4071 u16 bit;
4072
4073 bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
4074 bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
4075
4076 bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
4077
4078
4079
4080
4081
4082
4083 for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
4084 bitmap_or(recipes, recipes, profile_to_recipe[bit],
4085 ICE_MAX_NUM_RECIPES);
4086 bitmap_and(possible_idx, possible_idx,
4087 hw->switch_info->prof_res_bm[bit],
4088 ICE_MAX_FV_WORDS);
4089 }
4090
4091
4092
4093
4094 for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
4095 bitmap_or(used_idx, used_idx,
4096 hw->switch_info->recp_list[bit].res_idxs,
4097 ICE_MAX_FV_WORDS);
4098
4099 bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4100
4101
4102 return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
4103}
4104
4105
4106
4107
4108
4109
4110
4111static int
4112ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4113 unsigned long *profiles)
4114{
4115 DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
4116 struct ice_aqc_recipe_data_elem *tmp;
4117 struct ice_aqc_recipe_data_elem *buf;
4118 struct ice_recp_grp_entry *entry;
4119 u16 free_res_idx;
4120 u16 recipe_count;
4121 u8 chain_idx;
4122 u8 recps = 0;
4123 int status;
4124
4125
4126
4127
4128
4129
4130
4131 bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
4132 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
4133
4134 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
4135 free_res_idx, rm->n_grp_count);
4136
4137 if (rm->n_grp_count > 1) {
4138 if (rm->n_grp_count > free_res_idx)
4139 return -ENOSPC;
4140
4141 rm->n_grp_count++;
4142 }
4143
4144 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
4145 return -ENOSPC;
4146
4147 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
4148 if (!tmp)
4149 return -ENOMEM;
4150
4151 buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
4152 GFP_KERNEL);
4153 if (!buf) {
4154 status = -ENOMEM;
4155 goto err_mem;
4156 }
4157
4158 bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
4159 recipe_count = ICE_MAX_NUM_RECIPES;
4160 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
4161 NULL);
4162 if (status || recipe_count == 0)
4163 goto err_unroll;
4164
4165
4166
4167
4168 chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
4169 list_for_each_entry(entry, &rm->rg_list, l_entry) {
4170 u8 i;
4171
4172 status = ice_alloc_recipe(hw, &entry->rid);
4173 if (status)
4174 goto err_unroll;
4175
4176
4177
4178
4179 tmp[0].content.result_indx = 0;
4180
4181 buf[recps] = tmp[0];
4182 buf[recps].recipe_indx = (u8)entry->rid;
4183
4184
4185
4186 buf[recps].content.rid = 0;
4187 memset(&buf[recps].content.lkup_indx, 0,
4188 sizeof(buf[recps].content.lkup_indx));
4189
4190
4191 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4192 buf[recps].content.mask[0] =
4193 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
4194
4195
4196
4197 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4198 buf[recps].content.lkup_indx[i] = 0x80;
4199 buf[recps].content.mask[i] = 0;
4200 }
4201
4202 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
4203 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
4204 buf[recps].content.mask[i + 1] =
4205 cpu_to_le16(entry->fv_mask[i]);
4206 }
4207
4208 if (rm->n_grp_count > 1) {
4209
4210
4211
4212 if (chain_idx >= ICE_MAX_FV_WORDS) {
4213 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
4214 status = -ENOSPC;
4215 goto err_unroll;
4216 }
4217
4218 entry->chain_idx = chain_idx;
4219 buf[recps].content.result_indx =
4220 ICE_AQ_RECIPE_RESULT_EN |
4221 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
4222 ICE_AQ_RECIPE_RESULT_DATA_M);
4223 clear_bit(chain_idx, result_idx_bm);
4224 chain_idx = find_first_bit(result_idx_bm,
4225 ICE_MAX_FV_WORDS);
4226 }
4227
4228
4229 bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
4230 ICE_MAX_NUM_RECIPES);
4231 set_bit(buf[recps].recipe_indx,
4232 (unsigned long *)buf[recps].recipe_bitmap);
4233 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4234 recps++;
4235 }
4236
4237 if (rm->n_grp_count == 1) {
4238 rm->root_rid = buf[0].recipe_indx;
4239 set_bit(buf[0].recipe_indx, rm->r_bitmap);
4240 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
4241 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
4242 memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
4243 sizeof(buf[0].recipe_bitmap));
4244 } else {
4245 status = -EINVAL;
4246 goto err_unroll;
4247 }
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259 buf[0].content.act_ctrl_fwd_priority = rm->priority;
4260 } else {
4261 struct ice_recp_grp_entry *last_chain_entry;
4262 u16 rid, i;
4263
4264
4265
4266
4267 status = ice_alloc_recipe(hw, &rid);
4268 if (status)
4269 goto err_unroll;
4270
4271 buf[recps].recipe_indx = (u8)rid;
4272 buf[recps].content.rid = (u8)rid;
4273 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
4274
4275
4276
4277 last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
4278 sizeof(*last_chain_entry),
4279 GFP_KERNEL);
4280 if (!last_chain_entry) {
4281 status = -ENOMEM;
4282 goto err_unroll;
4283 }
4284 last_chain_entry->rid = rid;
4285 memset(&buf[recps].content.lkup_indx, 0,
4286 sizeof(buf[recps].content.lkup_indx));
4287
4288 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4289 buf[recps].content.mask[0] =
4290 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
4291 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4292 buf[recps].content.lkup_indx[i] =
4293 ICE_AQ_RECIPE_LKUP_IGNORE;
4294 buf[recps].content.mask[i] = 0;
4295 }
4296
4297 i = 1;
4298
4299 set_bit(rid, rm->r_bitmap);
4300
4301
4302
4303 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
4304 list_for_each_entry(entry, &rm->rg_list, l_entry) {
4305 last_chain_entry->fv_idx[i] = entry->chain_idx;
4306 buf[recps].content.lkup_indx[i] = entry->chain_idx;
4307 buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF);
4308 set_bit(entry->rid, rm->r_bitmap);
4309 }
4310 list_add(&last_chain_entry->l_entry, &rm->rg_list);
4311 if (sizeof(buf[recps].recipe_bitmap) >=
4312 sizeof(rm->r_bitmap)) {
4313 memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
4314 sizeof(buf[recps].recipe_bitmap));
4315 } else {
4316 status = -EINVAL;
4317 goto err_unroll;
4318 }
4319 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4320
4321 recps++;
4322 rm->root_rid = (u8)rid;
4323 }
4324 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
4325 if (status)
4326 goto err_unroll;
4327
4328 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
4329 ice_release_change_lock(hw);
4330 if (status)
4331 goto err_unroll;
4332
4333
4334
4335
4336 list_for_each_entry(entry, &rm->rg_list, l_entry) {
4337 struct ice_switch_info *sw = hw->switch_info;
4338 bool is_root, idx_found = false;
4339 struct ice_sw_recipe *recp;
4340 u16 idx, buf_idx = 0;
4341
4342
4343 for (idx = 0; idx < rm->n_grp_count; idx++)
4344 if (buf[idx].recipe_indx == entry->rid) {
4345 buf_idx = idx;
4346 idx_found = true;
4347 }
4348
4349 if (!idx_found) {
4350 status = -EIO;
4351 goto err_unroll;
4352 }
4353
4354 recp = &sw->recp_list[entry->rid];
4355 is_root = (rm->root_rid == entry->rid);
4356 recp->is_root = is_root;
4357
4358 recp->root_rid = entry->rid;
4359 recp->big_recp = (is_root && rm->n_grp_count > 1);
4360
4361 memcpy(&recp->ext_words, entry->r_group.pairs,
4362 entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
4363
4364 memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
4365 sizeof(recp->r_bitmap));
4366
4367
4368
4369
4370 ice_collect_result_idx(&buf[buf_idx], recp);
4371
4372
4373
4374
4375 if (!is_root)
4376 ice_collect_result_idx(&buf[buf_idx],
4377 &sw->recp_list[rm->root_rid]);
4378
4379 recp->n_ext_words = entry->r_group.n_val_pairs;
4380 recp->chain_idx = entry->chain_idx;
4381 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
4382 recp->n_grp_count = rm->n_grp_count;
4383 recp->tun_type = rm->tun_type;
4384 recp->recp_created = true;
4385 }
4386 rm->root_buf = buf;
4387 kfree(tmp);
4388 return status;
4389
4390err_unroll:
4391err_mem:
4392 kfree(tmp);
4393 devm_kfree(ice_hw_to_dev(hw), buf);
4394 return status;
4395}
4396
4397
4398
4399
4400
4401
4402
4403static int
4404ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
4405 struct ice_prot_lkup_ext *lkup_exts)
4406{
4407 u8 recp_count = 0;
4408 int status;
4409
4410 rm->n_grp_count = 0;
4411
4412
4413
4414
4415 status = ice_create_first_fit_recp_def(hw, lkup_exts,
4416 &rm->rg_list, &recp_count);
4417 if (!status) {
4418 rm->n_grp_count += recp_count;
4419 rm->n_ext_words = lkup_exts->n_val_words;
4420 memcpy(&rm->ext_words, lkup_exts->fv_words,
4421 sizeof(rm->ext_words));
4422 memcpy(rm->word_masks, lkup_exts->field_mask,
4423 sizeof(rm->word_masks));
4424 }
4425
4426 return status;
4427}
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438static int
4439ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
4440 unsigned long *bm, struct list_head *fv_list)
4441{
4442 u8 *prot_ids;
4443 int status;
4444 u16 i;
4445
4446 prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL);
4447 if (!prot_ids)
4448 return -ENOMEM;
4449
4450 for (i = 0; i < lkups_cnt; i++)
4451 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
4452 status = -EIO;
4453 goto free_mem;
4454 }
4455
4456
4457 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
4458
4459free_mem:
4460 kfree(prot_ids);
4461 return status;
4462}
4463
4464
4465
4466
4467
4468
4469static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
4470{
4471 switch (tun_type) {
4472 case ICE_SW_TUN_GENEVE:
4473 case ICE_SW_TUN_VXLAN:
4474 case ICE_SW_TUN_NVGRE:
4475 *mask = ICE_TUN_FLAG_MASK;
4476 return true;
4477
4478 default:
4479 *mask = 0;
4480 return false;
4481 }
4482}
4483
4484
4485
4486
4487
4488
4489static int
4490ice_add_special_words(struct ice_adv_rule_info *rinfo,
4491 struct ice_prot_lkup_ext *lkup_exts)
4492{
4493 u16 mask;
4494
4495
4496
4497
4498 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
4499 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
4500 u8 word = lkup_exts->n_val_words++;
4501
4502 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
4503 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
4504 lkup_exts->field_mask[word] = mask;
4505 } else {
4506 return -ENOSPC;
4507 }
4508 }
4509
4510 return 0;
4511}
4512
4513
4514
4515
4516
4517
4518static void
4519ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
4520 unsigned long *bm)
4521{
4522 enum ice_prof_type prof_type;
4523
4524 bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
4525
4526 switch (rinfo->tun_type) {
4527 case ICE_NON_TUN:
4528 prof_type = ICE_PROF_NON_TUN;
4529 break;
4530 case ICE_ALL_TUNNELS:
4531 prof_type = ICE_PROF_TUN_ALL;
4532 break;
4533 case ICE_SW_TUN_GENEVE:
4534 case ICE_SW_TUN_VXLAN:
4535 prof_type = ICE_PROF_TUN_UDP;
4536 break;
4537 case ICE_SW_TUN_NVGRE:
4538 prof_type = ICE_PROF_TUN_GRE;
4539 break;
4540 case ICE_SW_TUN_AND_NON_TUN:
4541 default:
4542 prof_type = ICE_PROF_ALL;
4543 break;
4544 }
4545
4546 ice_get_sw_fv_bitmap(hw, prof_type, bm);
4547}
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558static int
4559ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
4560 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
4561{
4562 DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
4563 DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
4564 struct ice_prot_lkup_ext *lkup_exts;
4565 struct ice_recp_grp_entry *r_entry;
4566 struct ice_sw_fv_list_entry *fvit;
4567 struct ice_recp_grp_entry *r_tmp;
4568 struct ice_sw_fv_list_entry *tmp;
4569 struct ice_sw_recipe *rm;
4570 int status = 0;
4571 u8 i;
4572
4573 if (!lkups_cnt)
4574 return -EINVAL;
4575
4576 lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
4577 if (!lkup_exts)
4578 return -ENOMEM;
4579
4580
4581
4582
4583 for (i = 0; i < lkups_cnt; i++) {
4584 u16 count;
4585
4586 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
4587 status = -EIO;
4588 goto err_free_lkup_exts;
4589 }
4590
4591 count = ice_fill_valid_words(&lkups[i], lkup_exts);
4592 if (!count) {
4593 status = -EIO;
4594 goto err_free_lkup_exts;
4595 }
4596 }
4597
4598 rm = kzalloc(sizeof(*rm), GFP_KERNEL);
4599 if (!rm) {
4600 status = -ENOMEM;
4601 goto err_free_lkup_exts;
4602 }
4603
4604
4605
4606
4607 INIT_LIST_HEAD(&rm->fv_list);
4608 INIT_LIST_HEAD(&rm->rg_list);
4609
4610
4611
4612
4613
4614 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
4615
4616 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
4617 if (status)
4618 goto err_unroll;
4619
4620
4621
4622
4623 status = ice_add_special_words(rinfo, lkup_exts);
4624 if (status)
4625 goto err_free_lkup_exts;
4626
4627
4628
4629
4630 status = ice_create_recipe_group(hw, rm, lkup_exts);
4631 if (status)
4632 goto err_unroll;
4633
4634
4635 rm->priority = (u8)rinfo->priority;
4636
4637
4638
4639
4640 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
4641 if (status)
4642 goto err_unroll;
4643
4644
4645 bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
4646 list_for_each_entry(fvit, &rm->fv_list, list_entry) {
4647 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
4648 set_bit((u16)fvit->profile_id, profiles);
4649 }
4650
4651
4652 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
4653 if (*rid < ICE_MAX_NUM_RECIPES)
4654
4655 goto err_unroll;
4656
4657 rm->tun_type = rinfo->tun_type;
4658
4659 status = ice_add_sw_recipe(hw, rm, profiles);
4660 if (status)
4661 goto err_unroll;
4662
4663
4664
4665
4666 list_for_each_entry(fvit, &rm->fv_list, list_entry) {
4667 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
4668 u16 j;
4669
4670 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
4671 (u8 *)r_bitmap, NULL);
4672 if (status)
4673 goto err_unroll;
4674
4675 bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
4676 ICE_MAX_NUM_RECIPES);
4677 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
4678 if (status)
4679 goto err_unroll;
4680
4681 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
4682 (u8 *)r_bitmap,
4683 NULL);
4684 ice_release_change_lock(hw);
4685
4686 if (status)
4687 goto err_unroll;
4688
4689
4690 bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
4691 ICE_MAX_NUM_RECIPES);
4692
4693
4694 for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
4695 set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
4696 }
4697
4698 *rid = rm->root_rid;
4699 memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
4700 sizeof(*lkup_exts));
4701err_unroll:
4702 list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
4703 list_del(&r_entry->l_entry);
4704 devm_kfree(ice_hw_to_dev(hw), r_entry);
4705 }
4706
4707 list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
4708 list_del(&fvit->list_entry);
4709 devm_kfree(ice_hw_to_dev(hw), fvit);
4710 }
4711
4712 if (rm->root_buf)
4713 devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
4714
4715 kfree(rm);
4716
4717err_free_lkup_exts:
4718 kfree(lkup_exts);
4719
4720 return status;
4721}
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734static void
4735ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
4736 enum ice_sw_tunnel_type tun_type,
4737 const u8 **pkt, u16 *pkt_len,
4738 const struct ice_dummy_pkt_offsets **offsets)
4739{
4740 bool tcp = false, udp = false, ipv6 = false, vlan = false;
4741 u16 i;
4742
4743 for (i = 0; i < lkups_cnt; i++) {
4744 if (lkups[i].type == ICE_UDP_ILOS)
4745 udp = true;
4746 else if (lkups[i].type == ICE_TCP_IL)
4747 tcp = true;
4748 else if (lkups[i].type == ICE_IPV6_OFOS)
4749 ipv6 = true;
4750 else if (lkups[i].type == ICE_VLAN_OFOS)
4751 vlan = true;
4752 else if (lkups[i].type == ICE_ETYPE_OL &&
4753 lkups[i].h_u.ethertype.ethtype_id ==
4754 cpu_to_be16(ICE_IPV6_ETHER_ID) &&
4755 lkups[i].m_u.ethertype.ethtype_id ==
4756 cpu_to_be16(0xFFFF))
4757 ipv6 = true;
4758 }
4759
4760 if (tun_type == ICE_SW_TUN_NVGRE) {
4761 if (tcp) {
4762 *pkt = dummy_gre_tcp_packet;
4763 *pkt_len = sizeof(dummy_gre_tcp_packet);
4764 *offsets = dummy_gre_tcp_packet_offsets;
4765 return;
4766 }
4767
4768 *pkt = dummy_gre_udp_packet;
4769 *pkt_len = sizeof(dummy_gre_udp_packet);
4770 *offsets = dummy_gre_udp_packet_offsets;
4771 return;
4772 }
4773
4774 if (tun_type == ICE_SW_TUN_VXLAN ||
4775 tun_type == ICE_SW_TUN_GENEVE) {
4776 if (tcp) {
4777 *pkt = dummy_udp_tun_tcp_packet;
4778 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
4779 *offsets = dummy_udp_tun_tcp_packet_offsets;
4780 return;
4781 }
4782
4783 *pkt = dummy_udp_tun_udp_packet;
4784 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
4785 *offsets = dummy_udp_tun_udp_packet_offsets;
4786 return;
4787 }
4788
4789 if (udp && !ipv6) {
4790 if (vlan) {
4791 *pkt = dummy_vlan_udp_packet;
4792 *pkt_len = sizeof(dummy_vlan_udp_packet);
4793 *offsets = dummy_vlan_udp_packet_offsets;
4794 return;
4795 }
4796 *pkt = dummy_udp_packet;
4797 *pkt_len = sizeof(dummy_udp_packet);
4798 *offsets = dummy_udp_packet_offsets;
4799 return;
4800 } else if (udp && ipv6) {
4801 if (vlan) {
4802 *pkt = dummy_vlan_udp_ipv6_packet;
4803 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
4804 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
4805 return;
4806 }
4807 *pkt = dummy_udp_ipv6_packet;
4808 *pkt_len = sizeof(dummy_udp_ipv6_packet);
4809 *offsets = dummy_udp_ipv6_packet_offsets;
4810 return;
4811 } else if ((tcp && ipv6) || ipv6) {
4812 if (vlan) {
4813 *pkt = dummy_vlan_tcp_ipv6_packet;
4814 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
4815 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
4816 return;
4817 }
4818 *pkt = dummy_tcp_ipv6_packet;
4819 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
4820 *offsets = dummy_tcp_ipv6_packet_offsets;
4821 return;
4822 }
4823
4824 if (vlan) {
4825 *pkt = dummy_vlan_tcp_packet;
4826 *pkt_len = sizeof(dummy_vlan_tcp_packet);
4827 *offsets = dummy_vlan_tcp_packet_offsets;
4828 } else {
4829 *pkt = dummy_tcp_packet;
4830 *pkt_len = sizeof(dummy_tcp_packet);
4831 *offsets = dummy_tcp_packet_offsets;
4832 }
4833}
4834
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846static int
4847ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
4848 struct ice_aqc_sw_rules_elem *s_rule,
4849 const u8 *dummy_pkt, u16 pkt_len,
4850 const struct ice_dummy_pkt_offsets *offsets)
4851{
4852 u8 *pkt;
4853 u16 i;
4854
4855
4856
4857
4858 pkt = s_rule->pdata.lkup_tx_rx.hdr;
4859
4860 memcpy(pkt, dummy_pkt, pkt_len);
4861
4862 for (i = 0; i < lkups_cnt; i++) {
4863 enum ice_protocol_type type;
4864 u16 offset = 0, len = 0, j;
4865 bool found = false;
4866
4867
4868
4869
4870 type = lkups[i].type;
4871 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
4872 if (type == offsets[j].type) {
4873 offset = offsets[j].offset;
4874 found = true;
4875 break;
4876 }
4877 }
4878
4879 if (!found)
4880 return -EINVAL;
4881
4882 switch (lkups[i].type) {
4883 case ICE_MAC_OFOS:
4884 case ICE_MAC_IL:
4885 len = sizeof(struct ice_ether_hdr);
4886 break;
4887 case ICE_ETYPE_OL:
4888 len = sizeof(struct ice_ethtype_hdr);
4889 break;
4890 case ICE_VLAN_OFOS:
4891 len = sizeof(struct ice_vlan_hdr);
4892 break;
4893 case ICE_IPV4_OFOS:
4894 case ICE_IPV4_IL:
4895 len = sizeof(struct ice_ipv4_hdr);
4896 break;
4897 case ICE_IPV6_OFOS:
4898 case ICE_IPV6_IL:
4899 len = sizeof(struct ice_ipv6_hdr);
4900 break;
4901 case ICE_TCP_IL:
4902 case ICE_UDP_OF:
4903 case ICE_UDP_ILOS:
4904 len = sizeof(struct ice_l4_hdr);
4905 break;
4906 case ICE_SCTP_IL:
4907 len = sizeof(struct ice_sctp_hdr);
4908 break;
4909 case ICE_NVGRE:
4910 len = sizeof(struct ice_nvgre_hdr);
4911 break;
4912 case ICE_VXLAN:
4913 case ICE_GENEVE:
4914 len = sizeof(struct ice_udp_tnl_hdr);
4915 break;
4916 default:
4917 return -EINVAL;
4918 }
4919
4920
4921 if (len % ICE_BYTES_PER_WORD)
4922 return -EIO;
4923
4924
4925
4926
4927
4928
4929
4930
4931 for (j = 0; j < len / sizeof(u16); j++)
4932 if (((u16 *)&lkups[i].m_u)[j])
4933 ((u16 *)(pkt + offset))[j] =
4934 (((u16 *)(pkt + offset))[j] &
4935 ~((u16 *)&lkups[i].m_u)[j]) |
4936 (((u16 *)&lkups[i].h_u)[j] &
4937 ((u16 *)&lkups[i].m_u)[j]);
4938 }
4939
4940 s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len);
4941
4942 return 0;
4943}
4944
4945
4946
4947
4948
4949
4950
4951
4952static int
4953ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
4954 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
4955{
4956 u16 open_port, i;
4957
4958 switch (tun_type) {
4959 case ICE_SW_TUN_VXLAN:
4960 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
4961 return -EIO;
4962 break;
4963 case ICE_SW_TUN_GENEVE:
4964 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
4965 return -EIO;
4966 break;
4967 default:
4968
4969 return 0;
4970 }
4971
4972
4973 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
4974 if (offsets[i].type == ICE_UDP_OF) {
4975 struct ice_l4_hdr *hdr;
4976 u16 offset;
4977
4978 offset = offsets[i].offset;
4979 hdr = (struct ice_l4_hdr *)&pkt[offset];
4980 hdr->dst_port = cpu_to_be16(open_port);
4981
4982 return 0;
4983 }
4984 }
4985
4986 return -EIO;
4987}
4988
4989
4990
4991
4992
4993
4994
4995
4996
4997
4998
4999
5000
5001static struct ice_adv_fltr_mgmt_list_entry *
5002ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5003 u16 lkups_cnt, u16 recp_id,
5004 struct ice_adv_rule_info *rinfo)
5005{
5006 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5007 struct ice_switch_info *sw = hw->switch_info;
5008 int i;
5009
5010 list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
5011 list_entry) {
5012 bool lkups_matched = true;
5013
5014 if (lkups_cnt != list_itr->lkups_cnt)
5015 continue;
5016 for (i = 0; i < list_itr->lkups_cnt; i++)
5017 if (memcmp(&list_itr->lkups[i], &lkups[i],
5018 sizeof(*lkups))) {
5019 lkups_matched = false;
5020 break;
5021 }
5022 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5023 rinfo->tun_type == list_itr->rule_info.tun_type &&
5024 lkups_matched)
5025 return list_itr;
5026 }
5027 return NULL;
5028}
5029
5030
5031
5032
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044
5045
5046
5047
5048
5049
5050
5051static int
5052ice_adv_add_update_vsi_list(struct ice_hw *hw,
5053 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5054 struct ice_adv_rule_info *cur_fltr,
5055 struct ice_adv_rule_info *new_fltr)
5056{
5057 u16 vsi_list_id = 0;
5058 int status;
5059
5060 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5061 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5062 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5063 return -EOPNOTSUPP;
5064
5065 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5066 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5067 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5068 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5069 return -EOPNOTSUPP;
5070
5071 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5072
5073
5074
5075
5076 struct ice_fltr_info tmp_fltr;
5077 u16 vsi_handle_arr[2];
5078
5079
5080 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5081 new_fltr->sw_act.fwd_id.hw_vsi_id)
5082 return -EEXIST;
5083
5084 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5085 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5086 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5087 &vsi_list_id,
5088 ICE_SW_LKUP_LAST);
5089 if (status)
5090 return status;
5091
5092 memset(&tmp_fltr, 0, sizeof(tmp_fltr));
5093 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
5094 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5095 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5096 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5097 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
5098
5099
5100
5101
5102 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5103 if (status)
5104 return status;
5105
5106 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5107 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5108 m_entry->vsi_list_info =
5109 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5110 vsi_list_id);
5111 } else {
5112 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5113
5114 if (!m_entry->vsi_list_info)
5115 return -EIO;
5116
5117
5118 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
5119 return 0;
5120
5121
5122
5123
5124 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
5125
5126 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
5127 vsi_list_id, false,
5128 ice_aqc_opc_update_sw_rules,
5129 ICE_SW_LKUP_LAST);
5130
5131 if (!status)
5132 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
5133 }
5134 if (!status)
5135 m_entry->vsi_count++;
5136 return status;
5137}
5138
5139
5140
5141
5142
5143
5144
5145
5146
5147
5148
5149
5150
5151
5152
5153
5154
5155
5156
5157int
5158ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5159 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
5160 struct ice_rule_query_data *added_entry)
5161{
5162 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
5163 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
5164 const struct ice_dummy_pkt_offsets *pkt_offsets;
5165 struct ice_aqc_sw_rules_elem *s_rule = NULL;
5166 struct list_head *rule_head;
5167 struct ice_switch_info *sw;
5168 const u8 *pkt = NULL;
5169 u16 word_cnt;
5170 u32 act = 0;
5171 int status;
5172 u8 q_rgn;
5173
5174
5175 if (!hw->switch_info->prof_res_bm_init) {
5176 hw->switch_info->prof_res_bm_init = 1;
5177 ice_init_prof_result_bm(hw);
5178 }
5179
5180 if (!lkups_cnt)
5181 return -EINVAL;
5182
5183
5184 word_cnt = 0;
5185 for (i = 0; i < lkups_cnt; i++) {
5186 u16 j, *ptr;
5187
5188 ptr = (u16 *)&lkups[i].m_u;
5189 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
5190 if (ptr[j] != 0)
5191 word_cnt++;
5192 }
5193
5194 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
5195 return -EINVAL;
5196
5197
5198 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
5199 &pkt_offsets);
5200 if (!pkt) {
5201 status = -EINVAL;
5202 goto err_ice_add_adv_rule;
5203 }
5204
5205 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5206 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
5207 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5208 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
5209 return -EIO;
5210
5211 vsi_handle = rinfo->sw_act.vsi_handle;
5212 if (!ice_is_vsi_valid(hw, vsi_handle))
5213 return -EINVAL;
5214
5215 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
5216 rinfo->sw_act.fwd_id.hw_vsi_id =
5217 ice_get_hw_vsi_num(hw, vsi_handle);
5218 if (rinfo->sw_act.flag & ICE_FLTR_TX)
5219 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
5220
5221 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
5222 if (status)
5223 return status;
5224 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5225 if (m_entry) {
5226
5227
5228
5229
5230
5231
5232
5233
5234 status = ice_adv_add_update_vsi_list(hw, m_entry,
5235 &m_entry->rule_info,
5236 rinfo);
5237 if (added_entry) {
5238 added_entry->rid = rid;
5239 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
5240 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5241 }
5242 return status;
5243 }
5244 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
5245 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
5246 if (!s_rule)
5247 return -ENOMEM;
5248 if (!rinfo->flags_info.act_valid) {
5249 act |= ICE_SINGLE_ACT_LAN_ENABLE;
5250 act |= ICE_SINGLE_ACT_LB_ENABLE;
5251 } else {
5252 act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
5253 ICE_SINGLE_ACT_LB_ENABLE);
5254 }
5255
5256 switch (rinfo->sw_act.fltr_act) {
5257 case ICE_FWD_TO_VSI:
5258 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
5259 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
5260 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
5261 break;
5262 case ICE_FWD_TO_Q:
5263 act |= ICE_SINGLE_ACT_TO_Q;
5264 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5265 ICE_SINGLE_ACT_Q_INDEX_M;
5266 break;
5267 case ICE_FWD_TO_QGRP:
5268 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
5269 (u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
5270 act |= ICE_SINGLE_ACT_TO_Q;
5271 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5272 ICE_SINGLE_ACT_Q_INDEX_M;
5273 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
5274 ICE_SINGLE_ACT_Q_REGION_M;
5275 break;
5276 case ICE_DROP_PACKET:
5277 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
5278 ICE_SINGLE_ACT_VALID_BIT;
5279 break;
5280 default:
5281 status = -EIO;
5282 goto err_ice_add_adv_rule;
5283 }
5284
5285
5286
5287
5288
5289
5290
5291
5292 if (rinfo->rx) {
5293 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
5294 s_rule->pdata.lkup_tx_rx.src =
5295 cpu_to_le16(hw->port_info->lport);
5296 } else {
5297 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
5298 s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(rinfo->sw_act.src);
5299 }
5300
5301 s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid);
5302 s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
5303
5304 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
5305 pkt_len, pkt_offsets);
5306 if (status)
5307 goto err_ice_add_adv_rule;
5308
5309 if (rinfo->tun_type != ICE_NON_TUN &&
5310 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
5311 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
5312 s_rule->pdata.lkup_tx_rx.hdr,
5313 pkt_offsets);
5314 if (status)
5315 goto err_ice_add_adv_rule;
5316 }
5317
5318 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5319 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
5320 NULL);
5321 if (status)
5322 goto err_ice_add_adv_rule;
5323 adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
5324 sizeof(struct ice_adv_fltr_mgmt_list_entry),
5325 GFP_KERNEL);
5326 if (!adv_fltr) {
5327 status = -ENOMEM;
5328 goto err_ice_add_adv_rule;
5329 }
5330
5331 adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
5332 lkups_cnt * sizeof(*lkups), GFP_KERNEL);
5333 if (!adv_fltr->lkups) {
5334 status = -ENOMEM;
5335 goto err_ice_add_adv_rule;
5336 }
5337
5338 adv_fltr->lkups_cnt = lkups_cnt;
5339 adv_fltr->rule_info = *rinfo;
5340 adv_fltr->rule_info.fltr_rule_id =
5341 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
5342 sw = hw->switch_info;
5343 sw->recp_list[rid].adv_rule = true;
5344 rule_head = &sw->recp_list[rid].filt_rules;
5345
5346 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
5347 adv_fltr->vsi_count = 1;
5348
5349
5350 list_add(&adv_fltr->list_entry, rule_head);
5351 if (added_entry) {
5352 added_entry->rid = rid;
5353 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
5354 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5355 }
5356err_ice_add_adv_rule:
5357 if (status && adv_fltr) {
5358 devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
5359 devm_kfree(ice_hw_to_dev(hw), adv_fltr);
5360 }
5361
5362 kfree(s_rule);
5363
5364 return status;
5365}
5366
5367
5368
5369
5370
5371
5372
5373
5374
5375
5376
5377static int
5378ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
5379 struct list_head *list_head)
5380{
5381 struct ice_fltr_mgmt_list_entry *itr;
5382 int status = 0;
5383 u16 hw_vsi_id;
5384
5385 if (list_empty(list_head))
5386 return status;
5387 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5388
5389 list_for_each_entry(itr, list_head, list_entry) {
5390 struct ice_fltr_list_entry f_entry;
5391
5392 f_entry.fltr_info = itr->fltr_info;
5393 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
5394 itr->fltr_info.vsi_handle == vsi_handle) {
5395
5396 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
5397 f_entry.fltr_info.src = hw_vsi_id;
5398 status = ice_add_rule_internal(hw, recp_id, &f_entry);
5399 if (status)
5400 goto end;
5401 continue;
5402 }
5403 if (!itr->vsi_list_info ||
5404 !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
5405 continue;
5406
5407 clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
5408 f_entry.fltr_info.vsi_handle = vsi_handle;
5409 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
5410
5411 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
5412 f_entry.fltr_info.src = hw_vsi_id;
5413 if (recp_id == ICE_SW_LKUP_VLAN)
5414 status = ice_add_vlan_internal(hw, &f_entry);
5415 else
5416 status = ice_add_rule_internal(hw, recp_id, &f_entry);
5417 if (status)
5418 goto end;
5419 }
5420end:
5421 return status;
5422}
5423
5424
5425
5426
5427
5428
5429
5430
5431static int
5432ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
5433 struct ice_adv_fltr_mgmt_list_entry *fm_list)
5434{
5435 struct ice_vsi_list_map_info *vsi_list_info;
5436 enum ice_sw_lkup_type lkup_type;
5437 u16 vsi_list_id;
5438 int status;
5439
5440 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
5441 fm_list->vsi_count == 0)
5442 return -EINVAL;
5443
5444
5445 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
5446 return -ENOENT;
5447
5448 lkup_type = ICE_SW_LKUP_LAST;
5449 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
5450 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
5451 ice_aqc_opc_update_sw_rules,
5452 lkup_type);
5453 if (status)
5454 return status;
5455
5456 fm_list->vsi_count--;
5457 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
5458 vsi_list_info = fm_list->vsi_list_info;
5459 if (fm_list->vsi_count == 1) {
5460 struct ice_fltr_info tmp_fltr;
5461 u16 rem_vsi_handle;
5462
5463 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
5464 ICE_MAX_VSI);
5465 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
5466 return -EIO;
5467
5468
5469 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
5470 vsi_list_id, true,
5471 ice_aqc_opc_update_sw_rules,
5472 lkup_type);
5473 if (status)
5474 return status;
5475
5476 memset(&tmp_fltr, 0, sizeof(tmp_fltr));
5477 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
5478 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
5479 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
5480 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
5481 tmp_fltr.fwd_id.hw_vsi_id =
5482 ice_get_hw_vsi_num(hw, rem_vsi_handle);
5483 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
5484 ice_get_hw_vsi_num(hw, rem_vsi_handle);
5485 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
5486
5487
5488
5489
5490 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5491 if (status) {
5492 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
5493 tmp_fltr.fwd_id.hw_vsi_id, status);
5494 return status;
5495 }
5496 fm_list->vsi_list_info->ref_cnt--;
5497
5498
5499 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
5500 if (status) {
5501 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
5502 vsi_list_id, status);
5503 return status;
5504 }
5505
5506 list_del(&vsi_list_info->list_entry);
5507 devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
5508 fm_list->vsi_list_info = NULL;
5509 }
5510
5511 return status;
5512}
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530static int
5531ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5532 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
5533{
5534 struct ice_adv_fltr_mgmt_list_entry *list_elem;
5535 struct ice_prot_lkup_ext lkup_exts;
5536 bool remove_rule = false;
5537 struct mutex *rule_lock;
5538 u16 i, rid, vsi_handle;
5539 int status = 0;
5540
5541 memset(&lkup_exts, 0, sizeof(lkup_exts));
5542 for (i = 0; i < lkups_cnt; i++) {
5543 u16 count;
5544
5545 if (lkups[i].type >= ICE_PROTOCOL_LAST)
5546 return -EIO;
5547
5548 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
5549 if (!count)
5550 return -EIO;
5551 }
5552
5553
5554
5555
5556 status = ice_add_special_words(rinfo, &lkup_exts);
5557 if (status)
5558 return status;
5559
5560 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
5561
5562 if (rid == ICE_MAX_NUM_RECIPES)
5563 return -EINVAL;
5564
5565 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
5566 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5567
5568 if (!list_elem)
5569 return 0;
5570 mutex_lock(rule_lock);
5571 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
5572 remove_rule = true;
5573 } else if (list_elem->vsi_count > 1) {
5574 remove_rule = false;
5575 vsi_handle = rinfo->sw_act.vsi_handle;
5576 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
5577 } else {
5578 vsi_handle = rinfo->sw_act.vsi_handle;
5579 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
5580 if (status) {
5581 mutex_unlock(rule_lock);
5582 return status;
5583 }
5584 if (list_elem->vsi_count == 0)
5585 remove_rule = true;
5586 }
5587 mutex_unlock(rule_lock);
5588 if (remove_rule) {
5589 struct ice_aqc_sw_rules_elem *s_rule;
5590 u16 rule_buf_sz;
5591
5592 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
5593 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
5594 if (!s_rule)
5595 return -ENOMEM;
5596 s_rule->pdata.lkup_tx_rx.act = 0;
5597 s_rule->pdata.lkup_tx_rx.index =
5598 cpu_to_le16(list_elem->rule_info.fltr_rule_id);
5599 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
5600 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5601 rule_buf_sz, 1,
5602 ice_aqc_opc_remove_sw_rules, NULL);
5603 if (!status || status == -ENOENT) {
5604 struct ice_switch_info *sw = hw->switch_info;
5605
5606 mutex_lock(rule_lock);
5607 list_del(&list_elem->list_entry);
5608 devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
5609 devm_kfree(ice_hw_to_dev(hw), list_elem);
5610 mutex_unlock(rule_lock);
5611 if (list_empty(&sw->recp_list[rid].filt_rules))
5612 sw->recp_list[rid].adv_rule = false;
5613 }
5614 kfree(s_rule);
5615 }
5616 return status;
5617}
5618
5619
5620
5621
5622
5623
5624
5625
5626
5627
5628int
5629ice_rem_adv_rule_by_id(struct ice_hw *hw,
5630 struct ice_rule_query_data *remove_entry)
5631{
5632 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5633 struct list_head *list_head;
5634 struct ice_adv_rule_info rinfo;
5635 struct ice_switch_info *sw;
5636
5637 sw = hw->switch_info;
5638 if (!sw->recp_list[remove_entry->rid].recp_created)
5639 return -EINVAL;
5640 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
5641 list_for_each_entry(list_itr, list_head, list_entry) {
5642 if (list_itr->rule_info.fltr_rule_id ==
5643 remove_entry->rule_id) {
5644 rinfo = list_itr->rule_info;
5645 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
5646 return ice_rem_adv_rule(hw, list_itr->lkups,
5647 list_itr->lkups_cnt, &rinfo);
5648 }
5649 }
5650
5651 return -ENOENT;
5652}
5653
5654
5655
5656
5657
5658
5659
5660
5661
5662
5663
5664int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
5665{
5666 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
5667 struct ice_vsi_list_map_info *map_info;
5668 struct ice_adv_rule_info rinfo;
5669 struct list_head *list_head;
5670 struct ice_switch_info *sw;
5671 int status;
5672 u8 rid;
5673
5674 sw = hw->switch_info;
5675 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
5676 if (!sw->recp_list[rid].recp_created)
5677 continue;
5678 if (!sw->recp_list[rid].adv_rule)
5679 continue;
5680
5681 list_head = &sw->recp_list[rid].filt_rules;
5682 list_for_each_entry_safe(list_itr, tmp_entry, list_head,
5683 list_entry) {
5684 rinfo = list_itr->rule_info;
5685
5686 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
5687 map_info = list_itr->vsi_list_info;
5688 if (!map_info)
5689 continue;
5690
5691 if (!test_bit(vsi_handle, map_info->vsi_map))
5692 continue;
5693 } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
5694 continue;
5695 }
5696
5697 rinfo.sw_act.vsi_handle = vsi_handle;
5698 status = ice_rem_adv_rule(hw, list_itr->lkups,
5699 list_itr->lkups_cnt, &rinfo);
5700 if (status)
5701 return status;
5702 }
5703 }
5704 return 0;
5705}
5706
5707
5708
5709
5710
5711
5712
5713
5714
5715static int
5716ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
5717 struct list_head *list_head)
5718{
5719 struct ice_rule_query_data added_entry = { 0 };
5720 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
5721 int status = 0;
5722
5723 if (list_empty(list_head))
5724 return status;
5725 list_for_each_entry(adv_fltr, list_head, list_entry) {
5726 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
5727 u16 lk_cnt = adv_fltr->lkups_cnt;
5728
5729 if (vsi_handle != rinfo->sw_act.vsi_handle)
5730 continue;
5731 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
5732 &added_entry);
5733 if (status)
5734 break;
5735 }
5736 return status;
5737}
5738
5739
5740
5741
5742
5743
5744
5745
5746int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
5747{
5748 struct ice_switch_info *sw = hw->switch_info;
5749 int status;
5750 u8 i;
5751
5752 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5753 struct list_head *head;
5754
5755 head = &sw->recp_list[i].filt_replay_rules;
5756 if (!sw->recp_list[i].adv_rule)
5757 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
5758 else
5759 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
5760 if (status)
5761 return status;
5762 }
5763 return status;
5764}
5765
5766
5767
5768
5769
5770
5771
5772void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
5773{
5774 struct ice_switch_info *sw = hw->switch_info;
5775 u8 i;
5776
5777 if (!sw)
5778 return;
5779
5780 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5781 if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
5782 struct list_head *l_head;
5783
5784 l_head = &sw->recp_list[i].filt_replay_rules;
5785 if (!sw->recp_list[i].adv_rule)
5786 ice_rem_sw_rule_info(hw, l_head);
5787 else
5788 ice_rem_adv_rule_info(hw, l_head);
5789 }
5790 }
5791}
5792