1
2
3
4#include "ice_common.h"
5#include "ice_flow.h"
6
7
8struct ice_flow_field_info {
9 enum ice_flow_seg_hdr hdr;
10 s16 off;
11 u16 size;
12};
13
14#define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
15 .hdr = _hdr, \
16 .off = (_offset_bytes) * BITS_PER_BYTE, \
17 .size = (_size_bytes) * BITS_PER_BYTE, \
18}
19
20
21static const
22struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
23
24
25 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)),
26
27 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, sizeof(struct in_addr)),
28
29 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)),
30
31 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)),
32
33
34 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)),
35
36 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, sizeof(__be16)),
37
38 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, sizeof(__be16)),
39
40 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, sizeof(__be16)),
41
42 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
43
44 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
45
46
47 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12,
48 sizeof_field(struct gre_full_hdr, key)),
49};
50
51
52
53
54
55static const u32 ice_ptypes_ipv4_ofos[] = {
56 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
57 0x00000000, 0x00000000, 0x00000000, 0x00000000,
58 0x00000000, 0x00000000, 0x00000000, 0x00000000,
59 0x00000000, 0x00000000, 0x00000000, 0x00000000,
60 0x00000000, 0x00000000, 0x00000000, 0x00000000,
61 0x00000000, 0x00000000, 0x00000000, 0x00000000,
62 0x00000000, 0x00000000, 0x00000000, 0x00000000,
63 0x00000000, 0x00000000, 0x00000000, 0x00000000,
64};
65
66
67static const u32 ice_ptypes_ipv4_il[] = {
68 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
69 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
70 0x00000000, 0x00000000, 0x00000000, 0x00000000,
71 0x00000000, 0x00000000, 0x00000000, 0x00000000,
72 0x00000000, 0x00000000, 0x00000000, 0x00000000,
73 0x00000000, 0x00000000, 0x00000000, 0x00000000,
74 0x00000000, 0x00000000, 0x00000000, 0x00000000,
75 0x00000000, 0x00000000, 0x00000000, 0x00000000,
76};
77
78
79static const u32 ice_ptypes_ipv6_ofos[] = {
80 0x00000000, 0x00000000, 0x77000000, 0x10002000,
81 0x00000000, 0x00000000, 0x00000000, 0x00000000,
82 0x00000000, 0x00000000, 0x00000000, 0x00000000,
83 0x00000000, 0x00000000, 0x00000000, 0x00000000,
84 0x00000000, 0x00000000, 0x00000000, 0x00000000,
85 0x00000000, 0x00000000, 0x00000000, 0x00000000,
86 0x00000000, 0x00000000, 0x00000000, 0x00000000,
87 0x00000000, 0x00000000, 0x00000000, 0x00000000,
88};
89
90
91static const u32 ice_ptypes_ipv6_il[] = {
92 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
93 0x00000770, 0x00000000, 0x00000000, 0x00000000,
94 0x00000000, 0x00000000, 0x00000000, 0x00000000,
95 0x00000000, 0x00000000, 0x00000000, 0x00000000,
96 0x00000000, 0x00000000, 0x00000000, 0x00000000,
97 0x00000000, 0x00000000, 0x00000000, 0x00000000,
98 0x00000000, 0x00000000, 0x00000000, 0x00000000,
99 0x00000000, 0x00000000, 0x00000000, 0x00000000,
100};
101
102
103
104
105static const u32 ice_ptypes_udp_il[] = {
106 0x81000000, 0x20204040, 0x04000010, 0x80810102,
107 0x00000040, 0x00000000, 0x00000000, 0x00000000,
108 0x00000000, 0x00000000, 0x00000000, 0x00000000,
109 0x00000000, 0x00000000, 0x00000000, 0x00000000,
110 0x00000000, 0x00000000, 0x00000000, 0x00000000,
111 0x00000000, 0x00000000, 0x00000000, 0x00000000,
112 0x00000000, 0x00000000, 0x00000000, 0x00000000,
113 0x00000000, 0x00000000, 0x00000000, 0x00000000,
114};
115
116
117static const u32 ice_ptypes_tcp_il[] = {
118 0x04000000, 0x80810102, 0x10000040, 0x02040408,
119 0x00000102, 0x00000000, 0x00000000, 0x00000000,
120 0x00000000, 0x00000000, 0x00000000, 0x00000000,
121 0x00000000, 0x00000000, 0x00000000, 0x00000000,
122 0x00000000, 0x00000000, 0x00000000, 0x00000000,
123 0x00000000, 0x00000000, 0x00000000, 0x00000000,
124 0x00000000, 0x00000000, 0x00000000, 0x00000000,
125 0x00000000, 0x00000000, 0x00000000, 0x00000000,
126};
127
128
129static const u32 ice_ptypes_sctp_il[] = {
130 0x08000000, 0x01020204, 0x20000081, 0x04080810,
131 0x00000204, 0x00000000, 0x00000000, 0x00000000,
132 0x00000000, 0x00000000, 0x00000000, 0x00000000,
133 0x00000000, 0x00000000, 0x00000000, 0x00000000,
134 0x00000000, 0x00000000, 0x00000000, 0x00000000,
135 0x00000000, 0x00000000, 0x00000000, 0x00000000,
136 0x00000000, 0x00000000, 0x00000000, 0x00000000,
137 0x00000000, 0x00000000, 0x00000000, 0x00000000,
138};
139
140
141static const u32 ice_ptypes_gre_of[] = {
142 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
143 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
144 0x00000000, 0x00000000, 0x00000000, 0x00000000,
145 0x00000000, 0x00000000, 0x00000000, 0x00000000,
146 0x00000000, 0x00000000, 0x00000000, 0x00000000,
147 0x00000000, 0x00000000, 0x00000000, 0x00000000,
148 0x00000000, 0x00000000, 0x00000000, 0x00000000,
149 0x00000000, 0x00000000, 0x00000000, 0x00000000,
150};
151
152
153struct ice_flow_prof_params {
154 enum ice_block blk;
155 u16 entry_length;
156 u8 es_cnt;
157 struct ice_flow_prof *prof;
158
159
160
161
162 struct ice_fv_word es[ICE_MAX_FV_WORDS];
163 DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX);
164};
165
166#define ICE_FLOW_SEG_HDRS_L3_MASK \
167 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
168#define ICE_FLOW_SEG_HDRS_L4_MASK \
169 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
170
171
172
173
174
175
176static enum ice_status
177ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
178{
179 u8 i;
180
181 for (i = 0; i < segs_cnt; i++) {
182
183 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
184 !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
185 return ICE_ERR_PARAM;
186
187
188 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
189 !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
190 return ICE_ERR_PARAM;
191 }
192
193 return 0;
194}
195
196
197#define ICE_FLOW_PROT_HDR_SZ_MAC 14
198#define ICE_FLOW_PROT_HDR_SZ_IPV4 20
199#define ICE_FLOW_PROT_HDR_SZ_IPV6 40
200#define ICE_FLOW_PROT_HDR_SZ_TCP 20
201#define ICE_FLOW_PROT_HDR_SZ_UDP 8
202#define ICE_FLOW_PROT_HDR_SZ_SCTP 12
203
204
205
206
207
208
209static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
210{
211 u16 sz = ICE_FLOW_PROT_HDR_SZ_MAC;
212
213
214 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
215 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
216 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
217 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
218
219
220 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
221 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
222 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
223 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
224 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
225 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
226
227 return sz;
228}
229
230
231
232
233
234
235
236
237static enum ice_status
238ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
239{
240 struct ice_flow_prof *prof;
241 u8 i;
242
243 memset(params->ptypes, 0xff, sizeof(params->ptypes));
244
245 prof = params->prof;
246
247 for (i = 0; i < params->prof->segs_cnt; i++) {
248 const unsigned long *src;
249 u32 hdrs;
250
251 hdrs = prof->segs[i].hdrs;
252
253 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
254 src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos :
255 (const unsigned long *)ice_ptypes_ipv4_il;
256 bitmap_and(params->ptypes, params->ptypes, src,
257 ICE_FLOW_PTYPE_MAX);
258 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
259 src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos :
260 (const unsigned long *)ice_ptypes_ipv6_il;
261 bitmap_and(params->ptypes, params->ptypes, src,
262 ICE_FLOW_PTYPE_MAX);
263 }
264
265 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
266 src = (const unsigned long *)ice_ptypes_udp_il;
267 bitmap_and(params->ptypes, params->ptypes, src,
268 ICE_FLOW_PTYPE_MAX);
269 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
270 bitmap_and(params->ptypes, params->ptypes,
271 (const unsigned long *)ice_ptypes_tcp_il,
272 ICE_FLOW_PTYPE_MAX);
273 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
274 src = (const unsigned long *)ice_ptypes_sctp_il;
275 bitmap_and(params->ptypes, params->ptypes, src,
276 ICE_FLOW_PTYPE_MAX);
277 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
278 if (!i) {
279 src = (const unsigned long *)ice_ptypes_gre_of;
280 bitmap_and(params->ptypes, params->ptypes,
281 src, ICE_FLOW_PTYPE_MAX);
282 }
283 }
284 }
285
286 return 0;
287}
288
289
290
291
292
293
294
295
296
297
298
299
300static enum ice_status
301ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
302 u8 seg, enum ice_flow_field fld)
303{
304 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
305 u8 fv_words = hw->blk[params->blk].es.fvw;
306 struct ice_flow_fld_info *flds;
307 u16 cnt, ese_bits, i;
308 u16 off;
309
310 flds = params->prof->segs[seg].fields;
311
312 switch (fld) {
313 case ICE_FLOW_FIELD_IDX_IPV4_SA:
314 case ICE_FLOW_FIELD_IDX_IPV4_DA:
315 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
316 break;
317 case ICE_FLOW_FIELD_IDX_IPV6_SA:
318 case ICE_FLOW_FIELD_IDX_IPV6_DA:
319 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
320 break;
321 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
322 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
323 prot_id = ICE_PROT_TCP_IL;
324 break;
325 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
326 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
327 prot_id = ICE_PROT_UDP_IL_OR_S;
328 break;
329 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
330 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
331 prot_id = ICE_PROT_SCTP_IL;
332 break;
333 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
334 prot_id = ICE_PROT_GRE_OF;
335 break;
336 default:
337 return ICE_ERR_NOT_IMPL;
338 }
339
340
341
342
343 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
344
345 flds[fld].xtrct.prot_id = prot_id;
346 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
347 ICE_FLOW_FV_EXTRACT_SZ;
348 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
349 flds[fld].xtrct.idx = params->es_cnt;
350
351
352
353
354 cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size,
355 ese_bits);
356
357
358 off = flds[fld].xtrct.off;
359 for (i = 0; i < cnt; i++) {
360 u8 idx;
361
362
363
364
365 if (params->es_cnt >= fv_words)
366 return ICE_ERR_MAX_LIMIT;
367
368
369 if (hw->blk[params->blk].es.reverse)
370 idx = fv_words - params->es_cnt - 1;
371 else
372 idx = params->es_cnt;
373
374 params->es[idx].prot_id = prot_id;
375 params->es[idx].off = off;
376 params->es_cnt++;
377
378 off += ICE_FLOW_FV_EXTRACT_SZ;
379 }
380
381 return 0;
382}
383
384
385
386
387
388
389
390static enum ice_status
391ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
392 u8 seg)
393{
394 u16 fv_words;
395 u16 hdrs_sz;
396 u8 i;
397
398 if (!params->prof->segs[seg].raws_cnt)
399 return 0;
400
401 if (params->prof->segs[seg].raws_cnt >
402 ARRAY_SIZE(params->prof->segs[seg].raws))
403 return ICE_ERR_MAX_LIMIT;
404
405
406 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
407 if (!hdrs_sz)
408 return ICE_ERR_PARAM;
409
410 fv_words = hw->blk[params->blk].es.fvw;
411
412 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
413 struct ice_flow_seg_fld_raw *raw;
414 u16 off, cnt, j;
415
416 raw = ¶ms->prof->segs[seg].raws[i];
417
418
419 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
420 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
421 ICE_FLOW_FV_EXTRACT_SZ;
422 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
423 BITS_PER_BYTE;
424 raw->info.xtrct.idx = params->es_cnt;
425
426
427
428
429 cnt = DIV_ROUND_UP(raw->info.xtrct.disp +
430 (raw->info.src.last * BITS_PER_BYTE),
431 (ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE));
432 off = raw->info.xtrct.off;
433 for (j = 0; j < cnt; j++) {
434 u16 idx;
435
436
437
438
439 if (params->es_cnt >= hw->blk[params->blk].es.count ||
440 params->es_cnt >= ICE_MAX_FV_WORDS)
441 return ICE_ERR_MAX_LIMIT;
442
443
444 if (hw->blk[params->blk].es.reverse)
445 idx = fv_words - params->es_cnt - 1;
446 else
447 idx = params->es_cnt;
448
449 params->es[idx].prot_id = raw->info.xtrct.prot_id;
450 params->es[idx].off = off;
451 params->es_cnt++;
452 off += ICE_FLOW_FV_EXTRACT_SZ;
453 }
454 }
455
456 return 0;
457}
458
459
460
461
462
463
464
465
466
467static enum ice_status
468ice_flow_create_xtrct_seq(struct ice_hw *hw,
469 struct ice_flow_prof_params *params)
470{
471 struct ice_flow_prof *prof = params->prof;
472 enum ice_status status = 0;
473 u8 i;
474
475 for (i = 0; i < prof->segs_cnt; i++) {
476 u8 j;
477
478 for_each_set_bit(j, (unsigned long *)&prof->segs[i].match,
479 ICE_FLOW_FIELD_IDX_MAX) {
480 status = ice_flow_xtract_fld(hw, params, i,
481 (enum ice_flow_field)j);
482 if (status)
483 return status;
484 }
485
486
487 status = ice_flow_xtract_raws(hw, params, i);
488 if (status)
489 return status;
490 }
491
492 return status;
493}
494
495
496
497
498
499
500static enum ice_status
501ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
502{
503 enum ice_status status;
504
505 status = ice_flow_proc_seg_hdrs(params);
506 if (status)
507 return status;
508
509 status = ice_flow_create_xtrct_seq(hw, params);
510 if (status)
511 return status;
512
513 switch (params->blk) {
514 case ICE_BLK_FD:
515 case ICE_BLK_RSS:
516 status = 0;
517 break;
518 default:
519 return ICE_ERR_NOT_IMPL;
520 }
521
522 return status;
523}
524
525#define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
526#define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
527#define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
528
529
530
531
532
533
534
535
536
537
538
539static struct ice_flow_prof *
540ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
541 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
542 u8 segs_cnt, u16 vsi_handle, u32 conds)
543{
544 struct ice_flow_prof *p, *prof = NULL;
545
546 mutex_lock(&hw->fl_profs_locks[blk]);
547 list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
548 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
549 segs_cnt && segs_cnt == p->segs_cnt) {
550 u8 i;
551
552
553 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
554 ice_is_vsi_valid(hw, vsi_handle) &&
555 !test_bit(vsi_handle, p->vsis))
556 continue;
557
558
559
560
561 for (i = 0; i < segs_cnt; i++)
562 if (segs[i].hdrs != p->segs[i].hdrs ||
563 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
564 segs[i].match != p->segs[i].match))
565 break;
566
567
568 if (i == segs_cnt) {
569 prof = p;
570 break;
571 }
572 }
573 mutex_unlock(&hw->fl_profs_locks[blk]);
574
575 return prof;
576}
577
578
579
580
581
582
583
584static struct ice_flow_prof *
585ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
586{
587 struct ice_flow_prof *p;
588
589 list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
590 if (p->id == prof_id)
591 return p;
592
593 return NULL;
594}
595
596
597
598
599
600
601static void
602ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
603{
604 if (!entry)
605 return;
606
607 if (entry->entry)
608 devm_kfree(ice_hw_to_dev(hw), entry->entry);
609
610 devm_kfree(ice_hw_to_dev(hw), entry);
611}
612
613
614
615
616
617
618
619static enum ice_status
620ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
621 struct ice_flow_entry *entry)
622{
623 if (!entry)
624 return ICE_ERR_BAD_PTR;
625
626 list_del(&entry->l_entry);
627
628 ice_dealloc_flow_entry(hw, entry);
629
630 return 0;
631}
632
633
634
635
636
637
638
639
640
641
642
643
644
645static enum ice_status
646ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
647 enum ice_flow_dir dir, u64 prof_id,
648 struct ice_flow_seg_info *segs, u8 segs_cnt,
649 struct ice_flow_prof **prof)
650{
651 struct ice_flow_prof_params params;
652 enum ice_status status;
653 u8 i;
654
655 if (!prof)
656 return ICE_ERR_BAD_PTR;
657
658 memset(¶ms, 0, sizeof(params));
659 params.prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params.prof),
660 GFP_KERNEL);
661 if (!params.prof)
662 return ICE_ERR_NO_MEMORY;
663
664
665 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
666 params.es[i].prot_id = ICE_PROT_INVALID;
667 params.es[i].off = ICE_FV_OFFSET_INVAL;
668 }
669
670 params.blk = blk;
671 params.prof->id = prof_id;
672 params.prof->dir = dir;
673 params.prof->segs_cnt = segs_cnt;
674
675
676
677
678 for (i = 0; i < segs_cnt; i++)
679 memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs));
680
681 status = ice_flow_proc_segs(hw, ¶ms);
682 if (status) {
683 ice_debug(hw, ICE_DBG_FLOW,
684 "Error processing a flow's packet segments\n");
685 goto out;
686 }
687
688
689 status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes, params.es);
690 if (status) {
691 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
692 goto out;
693 }
694
695 INIT_LIST_HEAD(¶ms.prof->entries);
696 mutex_init(¶ms.prof->entries_lock);
697 *prof = params.prof;
698
699out:
700 if (status)
701 devm_kfree(ice_hw_to_dev(hw), params.prof);
702
703 return status;
704}
705
706
707
708
709
710
711
712
713
714static enum ice_status
715ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
716 struct ice_flow_prof *prof)
717{
718 enum ice_status status;
719
720
721 if (!list_empty(&prof->entries)) {
722 struct ice_flow_entry *e, *t;
723
724 mutex_lock(&prof->entries_lock);
725
726 list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
727 status = ice_flow_rem_entry_sync(hw, blk, e);
728 if (status)
729 break;
730 }
731
732 mutex_unlock(&prof->entries_lock);
733 }
734
735
736 status = ice_rem_prof(hw, blk, prof->id);
737 if (!status) {
738 list_del(&prof->l_entry);
739 mutex_destroy(&prof->entries_lock);
740 devm_kfree(ice_hw_to_dev(hw), prof);
741 }
742
743 return status;
744}
745
746
747
748
749
750
751
752
753
754
755
756static enum ice_status
757ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
758 struct ice_flow_prof *prof, u16 vsi_handle)
759{
760 enum ice_status status = 0;
761
762 if (!test_bit(vsi_handle, prof->vsis)) {
763 status = ice_add_prof_id_flow(hw, blk,
764 ice_get_hw_vsi_num(hw,
765 vsi_handle),
766 prof->id);
767 if (!status)
768 set_bit(vsi_handle, prof->vsis);
769 else
770 ice_debug(hw, ICE_DBG_FLOW,
771 "HW profile add failed, %d\n",
772 status);
773 }
774
775 return status;
776}
777
778
779
780
781
782
783
784
785
786
787
788static enum ice_status
789ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
790 struct ice_flow_prof *prof, u16 vsi_handle)
791{
792 enum ice_status status = 0;
793
794 if (test_bit(vsi_handle, prof->vsis)) {
795 status = ice_rem_prof_id_flow(hw, blk,
796 ice_get_hw_vsi_num(hw,
797 vsi_handle),
798 prof->id);
799 if (!status)
800 clear_bit(vsi_handle, prof->vsis);
801 else
802 ice_debug(hw, ICE_DBG_FLOW,
803 "HW profile remove failed, %d\n",
804 status);
805 }
806
807 return status;
808}
809
810
811
812
813
814
815
816
817
818
819
820enum ice_status
821ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
822 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
823 struct ice_flow_prof **prof)
824{
825 enum ice_status status;
826
827 if (segs_cnt > ICE_FLOW_SEG_MAX)
828 return ICE_ERR_MAX_LIMIT;
829
830 if (!segs_cnt)
831 return ICE_ERR_PARAM;
832
833 if (!segs)
834 return ICE_ERR_BAD_PTR;
835
836 status = ice_flow_val_hdrs(segs, segs_cnt);
837 if (status)
838 return status;
839
840 mutex_lock(&hw->fl_profs_locks[blk]);
841
842 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
843 prof);
844 if (!status)
845 list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
846
847 mutex_unlock(&hw->fl_profs_locks[blk]);
848
849 return status;
850}
851
852
853
854
855
856
857
858enum ice_status
859ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
860{
861 struct ice_flow_prof *prof;
862 enum ice_status status;
863
864 mutex_lock(&hw->fl_profs_locks[blk]);
865
866 prof = ice_flow_find_prof_id(hw, blk, prof_id);
867 if (!prof) {
868 status = ICE_ERR_DOES_NOT_EXIST;
869 goto out;
870 }
871
872
873 status = ice_flow_rem_prof_sync(hw, blk, prof);
874
875out:
876 mutex_unlock(&hw->fl_profs_locks[blk]);
877
878 return status;
879}
880
881
882
883
884
885
886
887
888
889
890
891
892enum ice_status
893ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
894 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
895 void *data, u64 *entry_h)
896{
897 struct ice_flow_entry *e = NULL;
898 struct ice_flow_prof *prof;
899 enum ice_status status;
900
901
902 if (!entry_h || (!data && blk != ICE_BLK_RSS))
903 return ICE_ERR_BAD_PTR;
904
905 if (!ice_is_vsi_valid(hw, vsi_handle))
906 return ICE_ERR_PARAM;
907
908 mutex_lock(&hw->fl_profs_locks[blk]);
909
910 prof = ice_flow_find_prof_id(hw, blk, prof_id);
911 if (!prof) {
912 status = ICE_ERR_DOES_NOT_EXIST;
913 } else {
914
915
916
917 e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL);
918 if (!e)
919 status = ICE_ERR_NO_MEMORY;
920 else
921 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
922 }
923
924 mutex_unlock(&hw->fl_profs_locks[blk]);
925 if (status)
926 goto out;
927
928 e->id = entry_id;
929 e->vsi_handle = vsi_handle;
930 e->prof = prof;
931 e->priority = prio;
932
933 switch (blk) {
934 case ICE_BLK_FD:
935 case ICE_BLK_RSS:
936 break;
937 default:
938 status = ICE_ERR_NOT_IMPL;
939 goto out;
940 }
941
942 mutex_lock(&prof->entries_lock);
943 list_add(&e->l_entry, &prof->entries);
944 mutex_unlock(&prof->entries_lock);
945
946 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
947
948out:
949 if (status && e) {
950 if (e->entry)
951 devm_kfree(ice_hw_to_dev(hw), e->entry);
952 devm_kfree(ice_hw_to_dev(hw), e);
953 }
954
955 return status;
956}
957
958
959
960
961
962
963
964enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
965 u64 entry_h)
966{
967 struct ice_flow_entry *entry;
968 struct ice_flow_prof *prof;
969 enum ice_status status = 0;
970
971 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
972 return ICE_ERR_PARAM;
973
974 entry = ICE_FLOW_ENTRY_PTR(entry_h);
975
976
977 prof = entry->prof;
978
979 if (prof) {
980 mutex_lock(&prof->entries_lock);
981 status = ice_flow_rem_entry_sync(hw, blk, entry);
982 mutex_unlock(&prof->entries_lock);
983 }
984
985 return status;
986}
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011static void
1012ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1013 enum ice_flow_fld_match_type field_type, u16 val_loc,
1014 u16 mask_loc, u16 last_loc)
1015{
1016 u64 bit = BIT_ULL(fld);
1017
1018 seg->match |= bit;
1019 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
1020 seg->range |= bit;
1021
1022 seg->fields[fld].type = field_type;
1023 seg->fields[fld].src.val = val_loc;
1024 seg->fields[fld].src.mask = mask_loc;
1025 seg->fields[fld].src.last = last_loc;
1026
1027 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
1028}
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050void
1051ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1052 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
1053{
1054 enum ice_flow_fld_match_type t = range ?
1055 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
1056
1057 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
1058}
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077void
1078ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
1079 u16 val_loc, u16 mask_loc)
1080{
1081 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
1082 seg->raws[seg->raws_cnt].off = off;
1083 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
1084 seg->raws[seg->raws_cnt].info.src.val = val_loc;
1085 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
1086
1087 seg->raws[seg->raws_cnt].info.src.last = len;
1088 }
1089
1090
1091
1092
1093 seg->raws_cnt++;
1094}
1095
1096#define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
1097 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
1098
1099#define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
1100 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
1101
1102#define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
1103 (ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
1104 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116static enum ice_status
1117ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
1118 u32 flow_hdr)
1119{
1120 u64 val;
1121 u8 i;
1122
1123 for_each_set_bit(i, (unsigned long *)&hash_fields,
1124 ICE_FLOW_FIELD_IDX_MAX)
1125 ice_flow_set_fld(segs, (enum ice_flow_field)i,
1126 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1127 ICE_FLOW_FLD_OFF_INVAL, false);
1128
1129 ICE_FLOW_SET_HDRS(segs, flow_hdr);
1130
1131 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
1132 return ICE_ERR_PARAM;
1133
1134 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
1135 if (val && !is_power_of_2(val))
1136 return ICE_ERR_CFG;
1137
1138 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
1139 if (val && !is_power_of_2(val))
1140 return ICE_ERR_CFG;
1141
1142 return 0;
1143}
1144
1145
1146
1147
1148
1149
1150
1151
1152void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
1153{
1154 struct ice_rss_cfg *r, *tmp;
1155
1156 if (list_empty(&hw->rss_list_head))
1157 return;
1158
1159 mutex_lock(&hw->rss_locks);
1160 list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
1161 if (test_and_clear_bit(vsi_handle, r->vsis))
1162 if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
1163 list_del(&r->l_entry);
1164 devm_kfree(ice_hw_to_dev(hw), r);
1165 }
1166 mutex_unlock(&hw->rss_locks);
1167}
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
1179{
1180 const enum ice_block blk = ICE_BLK_RSS;
1181 struct ice_flow_prof *p, *t;
1182 enum ice_status status = 0;
1183
1184 if (!ice_is_vsi_valid(hw, vsi_handle))
1185 return ICE_ERR_PARAM;
1186
1187 if (list_empty(&hw->fl_profs[blk]))
1188 return 0;
1189
1190 mutex_lock(&hw->rss_locks);
1191 list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry)
1192 if (test_bit(vsi_handle, p->vsis)) {
1193 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
1194 if (status)
1195 break;
1196
1197 if (bitmap_empty(p->vsis, ICE_MAX_VSI)) {
1198 status = ice_flow_rem_prof(hw, blk, p->id);
1199 if (status)
1200 break;
1201 }
1202 }
1203 mutex_unlock(&hw->rss_locks);
1204
1205 return status;
1206}
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216static void
1217ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
1218{
1219 struct ice_rss_cfg *r, *tmp;
1220
1221
1222
1223
1224
1225 list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
1226 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
1227 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
1228 clear_bit(vsi_handle, r->vsis);
1229 if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
1230 list_del(&r->l_entry);
1231 devm_kfree(ice_hw_to_dev(hw), r);
1232 }
1233 return;
1234 }
1235}
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245static enum ice_status
1246ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
1247{
1248 struct ice_rss_cfg *r, *rss_cfg;
1249
1250 list_for_each_entry(r, &hw->rss_list_head, l_entry)
1251 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
1252 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
1253 set_bit(vsi_handle, r->vsis);
1254 return 0;
1255 }
1256
1257 rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg),
1258 GFP_KERNEL);
1259 if (!rss_cfg)
1260 return ICE_ERR_NO_MEMORY;
1261
1262 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
1263 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
1264 set_bit(vsi_handle, rss_cfg->vsis);
1265
1266 list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head);
1267
1268 return 0;
1269}
1270
1271#define ICE_FLOW_PROF_HASH_S 0
1272#define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
1273#define ICE_FLOW_PROF_HDR_S 32
1274#define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
1275#define ICE_FLOW_PROF_ENCAP_S 63
1276#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
1277
1278#define ICE_RSS_OUTER_HEADERS 1
1279#define ICE_RSS_INNER_HEADERS 2
1280
1281
1282
1283
1284
1285
1286#define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
1287 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
1288 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
1289 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301static enum ice_status
1302ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
1303 u32 addl_hdrs, u8 segs_cnt)
1304{
1305 const enum ice_block blk = ICE_BLK_RSS;
1306 struct ice_flow_prof *prof = NULL;
1307 struct ice_flow_seg_info *segs;
1308 enum ice_status status;
1309
1310 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
1311 return ICE_ERR_PARAM;
1312
1313 segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
1314 if (!segs)
1315 return ICE_ERR_NO_MEMORY;
1316
1317
1318 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
1319 addl_hdrs);
1320 if (status)
1321 goto exit;
1322
1323
1324
1325
1326
1327 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1328 vsi_handle,
1329 ICE_FLOW_FIND_PROF_CHK_FLDS |
1330 ICE_FLOW_FIND_PROF_CHK_VSI);
1331 if (prof)
1332 goto exit;
1333
1334
1335
1336
1337
1338
1339 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1340 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
1341 if (prof) {
1342 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
1343 if (!status)
1344 ice_rem_rss_list(hw, vsi_handle, prof);
1345 else
1346 goto exit;
1347
1348
1349 if (bitmap_empty(prof->vsis, ICE_MAX_VSI)) {
1350 status = ice_flow_rem_prof(hw, blk, prof->id);
1351 if (status)
1352 goto exit;
1353 }
1354 }
1355
1356
1357
1358
1359 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1360 vsi_handle,
1361 ICE_FLOW_FIND_PROF_CHK_FLDS);
1362 if (prof) {
1363 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1364 if (!status)
1365 status = ice_add_rss_list(hw, vsi_handle, prof);
1366 goto exit;
1367 }
1368
1369
1370
1371
1372 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
1373 ICE_FLOW_GEN_PROFID(hashed_flds,
1374 segs[segs_cnt - 1].hdrs,
1375 segs_cnt),
1376 segs, segs_cnt, &prof);
1377 if (status)
1378 goto exit;
1379
1380 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1381
1382
1383
1384 if (status) {
1385 ice_flow_rem_prof(hw, blk, prof->id);
1386 goto exit;
1387 }
1388
1389 status = ice_add_rss_list(hw, vsi_handle, prof);
1390
1391exit:
1392 kfree(segs);
1393 return status;
1394}
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407enum ice_status
1408ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
1409 u32 addl_hdrs)
1410{
1411 enum ice_status status;
1412
1413 if (hashed_flds == ICE_HASH_INVALID ||
1414 !ice_is_vsi_valid(hw, vsi_handle))
1415 return ICE_ERR_PARAM;
1416
1417 mutex_lock(&hw->rss_locks);
1418 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
1419 ICE_RSS_OUTER_HEADERS);
1420 if (!status)
1421 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
1422 addl_hdrs, ICE_RSS_INNER_HEADERS);
1423 mutex_unlock(&hw->rss_locks);
1424
1425 return status;
1426}
1427
1428
1429
1430
1431
1432#define ICE_FLOW_AVF_RSS_IPV4_MASKS \
1433 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
1434 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
1435#define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
1436 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
1437 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
1438#define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
1439 (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
1440 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
1441 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
1442#define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
1443 (ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
1444 ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
1445
1446#define ICE_FLOW_AVF_RSS_IPV6_MASKS \
1447 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
1448 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
1449#define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
1450 (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
1451 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
1452 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
1453#define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
1454 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
1455 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
1456#define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
1457 (ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
1458 ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470enum ice_status
1471ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
1472{
1473 enum ice_status status = 0;
1474 u64 hash_flds;
1475
1476 if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
1477 !ice_is_vsi_valid(hw, vsi_handle))
1478 return ICE_ERR_PARAM;
1479
1480
1481 if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
1482 ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
1483 return ICE_ERR_CFG;
1484
1485 hash_flds = avf_hash;
1486
1487
1488 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS)
1489 hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS;
1490
1491 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)
1492 hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS;
1493
1494
1495 while (hash_flds) {
1496 u64 rss_hash = ICE_HASH_INVALID;
1497
1498 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) {
1499 if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) {
1500 rss_hash = ICE_FLOW_HASH_IPV4;
1501 hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS;
1502 } else if (hash_flds &
1503 ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) {
1504 rss_hash = ICE_FLOW_HASH_IPV4 |
1505 ICE_FLOW_HASH_TCP_PORT;
1506 hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS;
1507 } else if (hash_flds &
1508 ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) {
1509 rss_hash = ICE_FLOW_HASH_IPV4 |
1510 ICE_FLOW_HASH_UDP_PORT;
1511 hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
1512 } else if (hash_flds &
1513 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
1514 rss_hash = ICE_FLOW_HASH_IPV4 |
1515 ICE_FLOW_HASH_SCTP_PORT;
1516 hash_flds &=
1517 ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
1518 }
1519 } else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
1520 if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
1521 rss_hash = ICE_FLOW_HASH_IPV6;
1522 hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS;
1523 } else if (hash_flds &
1524 ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) {
1525 rss_hash = ICE_FLOW_HASH_IPV6 |
1526 ICE_FLOW_HASH_TCP_PORT;
1527 hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS;
1528 } else if (hash_flds &
1529 ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) {
1530 rss_hash = ICE_FLOW_HASH_IPV6 |
1531 ICE_FLOW_HASH_UDP_PORT;
1532 hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
1533 } else if (hash_flds &
1534 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
1535 rss_hash = ICE_FLOW_HASH_IPV6 |
1536 ICE_FLOW_HASH_SCTP_PORT;
1537 hash_flds &=
1538 ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
1539 }
1540 }
1541
1542 if (rss_hash == ICE_HASH_INVALID)
1543 return ICE_ERR_OUT_OF_RANGE;
1544
1545 status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
1546 ICE_FLOW_SEG_HDR_NONE);
1547 if (status)
1548 break;
1549 }
1550
1551 return status;
1552}
1553
1554
1555
1556
1557
1558
1559enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
1560{
1561 enum ice_status status = 0;
1562 struct ice_rss_cfg *r;
1563
1564 if (!ice_is_vsi_valid(hw, vsi_handle))
1565 return ICE_ERR_PARAM;
1566
1567 mutex_lock(&hw->rss_locks);
1568 list_for_each_entry(r, &hw->rss_list_head, l_entry) {
1569 if (test_bit(vsi_handle, r->vsis)) {
1570 status = ice_add_rss_cfg_sync(hw, vsi_handle,
1571 r->hashed_flds,
1572 r->packet_hdr,
1573 ICE_RSS_OUTER_HEADERS);
1574 if (status)
1575 break;
1576 status = ice_add_rss_cfg_sync(hw, vsi_handle,
1577 r->hashed_flds,
1578 r->packet_hdr,
1579 ICE_RSS_INNER_HEADERS);
1580 if (status)
1581 break;
1582 }
1583 }
1584 mutex_unlock(&hw->rss_locks);
1585
1586 return status;
1587}
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
1599{
1600 u64 rss_hash = ICE_HASH_INVALID;
1601 struct ice_rss_cfg *r;
1602
1603
1604 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
1605 return ICE_HASH_INVALID;
1606
1607 mutex_lock(&hw->rss_locks);
1608 list_for_each_entry(r, &hw->rss_list_head, l_entry)
1609 if (test_bit(vsi_handle, r->vsis) &&
1610 r->packet_hdr == hdrs) {
1611 rss_hash = r->hashed_flds;
1612 break;
1613 }
1614 mutex_unlock(&hw->rss_locks);
1615
1616 return rss_hash;
1617}
1618