1
2
3
4#include "ice_common.h"
5#include "ice_flow.h"
6
7
8struct ice_flow_field_info {
9 enum ice_flow_seg_hdr hdr;
10 s16 off;
11 u16 size;
12};
13
14#define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
15 .hdr = _hdr, \
16 .off = (_offset_bytes) * BITS_PER_BYTE, \
17 .size = (_size_bytes) * BITS_PER_BYTE, \
18}
19
20
21static const
22struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
23
24
25 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)),
26
27 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, sizeof(struct in_addr)),
28
29 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)),
30
31 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)),
32
33
34 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)),
35
36 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, sizeof(__be16)),
37
38 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, sizeof(__be16)),
39
40 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, sizeof(__be16)),
41
42 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
43
44 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
45
46
47 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12,
48 sizeof_field(struct gre_full_hdr, key)),
49};
50
51
52
53
54
55static const u32 ice_ptypes_ipv4_ofos[] = {
56 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
57 0x00000000, 0x00000000, 0x00000000, 0x00000000,
58 0x00000000, 0x00000000, 0x00000000, 0x00000000,
59 0x00000000, 0x00000000, 0x00000000, 0x00000000,
60 0x00000000, 0x00000000, 0x00000000, 0x00000000,
61 0x00000000, 0x00000000, 0x00000000, 0x00000000,
62 0x00000000, 0x00000000, 0x00000000, 0x00000000,
63 0x00000000, 0x00000000, 0x00000000, 0x00000000,
64};
65
66
67static const u32 ice_ptypes_ipv4_il[] = {
68 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
69 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
70 0x00000000, 0x00000000, 0x00000000, 0x00000000,
71 0x00000000, 0x00000000, 0x00000000, 0x00000000,
72 0x00000000, 0x00000000, 0x00000000, 0x00000000,
73 0x00000000, 0x00000000, 0x00000000, 0x00000000,
74 0x00000000, 0x00000000, 0x00000000, 0x00000000,
75 0x00000000, 0x00000000, 0x00000000, 0x00000000,
76};
77
78
79static const u32 ice_ptypes_ipv6_ofos[] = {
80 0x00000000, 0x00000000, 0x77000000, 0x10002000,
81 0x00000000, 0x00000000, 0x00000000, 0x00000000,
82 0x00000000, 0x00000000, 0x00000000, 0x00000000,
83 0x00000000, 0x00000000, 0x00000000, 0x00000000,
84 0x00000000, 0x00000000, 0x00000000, 0x00000000,
85 0x00000000, 0x00000000, 0x00000000, 0x00000000,
86 0x00000000, 0x00000000, 0x00000000, 0x00000000,
87 0x00000000, 0x00000000, 0x00000000, 0x00000000,
88};
89
90
91static const u32 ice_ptypes_ipv6_il[] = {
92 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
93 0x00000770, 0x00000000, 0x00000000, 0x00000000,
94 0x00000000, 0x00000000, 0x00000000, 0x00000000,
95 0x00000000, 0x00000000, 0x00000000, 0x00000000,
96 0x00000000, 0x00000000, 0x00000000, 0x00000000,
97 0x00000000, 0x00000000, 0x00000000, 0x00000000,
98 0x00000000, 0x00000000, 0x00000000, 0x00000000,
99 0x00000000, 0x00000000, 0x00000000, 0x00000000,
100};
101
102
103static const u32 ice_ipv4_ofos_no_l4[] = {
104 0x10C00000, 0x04000800, 0x00000000, 0x00000000,
105 0x00000000, 0x00000000, 0x00000000, 0x00000000,
106 0x00000000, 0x00000000, 0x00000000, 0x00000000,
107 0x00000000, 0x00000000, 0x00000000, 0x00000000,
108 0x00000000, 0x00000000, 0x00000000, 0x00000000,
109 0x00000000, 0x00000000, 0x00000000, 0x00000000,
110 0x00000000, 0x00000000, 0x00000000, 0x00000000,
111 0x00000000, 0x00000000, 0x00000000, 0x00000000,
112};
113
114
115static const u32 ice_ipv4_il_no_l4[] = {
116 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
117 0x00000008, 0x00000000, 0x00000000, 0x00000000,
118 0x00000000, 0x00000000, 0x00000000, 0x00000000,
119 0x00000000, 0x00000000, 0x00000000, 0x00000000,
120 0x00000000, 0x00000000, 0x00000000, 0x00000000,
121 0x00000000, 0x00000000, 0x00000000, 0x00000000,
122 0x00000000, 0x00000000, 0x00000000, 0x00000000,
123 0x00000000, 0x00000000, 0x00000000, 0x00000000,
124};
125
126
127static const u32 ice_ipv6_ofos_no_l4[] = {
128 0x00000000, 0x00000000, 0x43000000, 0x10002000,
129 0x00000000, 0x00000000, 0x00000000, 0x00000000,
130 0x00000000, 0x00000000, 0x00000000, 0x00000000,
131 0x00000000, 0x00000000, 0x00000000, 0x00000000,
132 0x00000000, 0x00000000, 0x00000000, 0x00000000,
133 0x00000000, 0x00000000, 0x00000000, 0x00000000,
134 0x00000000, 0x00000000, 0x00000000, 0x00000000,
135 0x00000000, 0x00000000, 0x00000000, 0x00000000,
136};
137
138
139static const u32 ice_ipv6_il_no_l4[] = {
140 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
141 0x00000430, 0x00000000, 0x00000000, 0x00000000,
142 0x00000000, 0x00000000, 0x00000000, 0x00000000,
143 0x00000000, 0x00000000, 0x00000000, 0x00000000,
144 0x00000000, 0x00000000, 0x00000000, 0x00000000,
145 0x00000000, 0x00000000, 0x00000000, 0x00000000,
146 0x00000000, 0x00000000, 0x00000000, 0x00000000,
147 0x00000000, 0x00000000, 0x00000000, 0x00000000,
148};
149
150
151
152
153static const u32 ice_ptypes_udp_il[] = {
154 0x81000000, 0x20204040, 0x04000010, 0x80810102,
155 0x00000040, 0x00000000, 0x00000000, 0x00000000,
156 0x00000000, 0x00000000, 0x00000000, 0x00000000,
157 0x00000000, 0x00000000, 0x00000000, 0x00000000,
158 0x00000000, 0x00000000, 0x00000000, 0x00000000,
159 0x00000000, 0x00000000, 0x00000000, 0x00000000,
160 0x00000000, 0x00000000, 0x00000000, 0x00000000,
161 0x00000000, 0x00000000, 0x00000000, 0x00000000,
162};
163
164
165static const u32 ice_ptypes_tcp_il[] = {
166 0x04000000, 0x80810102, 0x10000040, 0x02040408,
167 0x00000102, 0x00000000, 0x00000000, 0x00000000,
168 0x00000000, 0x00000000, 0x00000000, 0x00000000,
169 0x00000000, 0x00000000, 0x00000000, 0x00000000,
170 0x00000000, 0x00000000, 0x00000000, 0x00000000,
171 0x00000000, 0x00000000, 0x00000000, 0x00000000,
172 0x00000000, 0x00000000, 0x00000000, 0x00000000,
173 0x00000000, 0x00000000, 0x00000000, 0x00000000,
174};
175
176
177static const u32 ice_ptypes_sctp_il[] = {
178 0x08000000, 0x01020204, 0x20000081, 0x04080810,
179 0x00000204, 0x00000000, 0x00000000, 0x00000000,
180 0x00000000, 0x00000000, 0x00000000, 0x00000000,
181 0x00000000, 0x00000000, 0x00000000, 0x00000000,
182 0x00000000, 0x00000000, 0x00000000, 0x00000000,
183 0x00000000, 0x00000000, 0x00000000, 0x00000000,
184 0x00000000, 0x00000000, 0x00000000, 0x00000000,
185 0x00000000, 0x00000000, 0x00000000, 0x00000000,
186};
187
188
189static const u32 ice_ptypes_gre_of[] = {
190 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
191 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
192 0x00000000, 0x00000000, 0x00000000, 0x00000000,
193 0x00000000, 0x00000000, 0x00000000, 0x00000000,
194 0x00000000, 0x00000000, 0x00000000, 0x00000000,
195 0x00000000, 0x00000000, 0x00000000, 0x00000000,
196 0x00000000, 0x00000000, 0x00000000, 0x00000000,
197 0x00000000, 0x00000000, 0x00000000, 0x00000000,
198};
199
200
201struct ice_flow_prof_params {
202 enum ice_block blk;
203 u16 entry_length;
204 u8 es_cnt;
205 struct ice_flow_prof *prof;
206
207
208
209
210 struct ice_fv_word es[ICE_MAX_FV_WORDS];
211 DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX);
212};
213
214#define ICE_FLOW_SEG_HDRS_L3_MASK \
215 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
216#define ICE_FLOW_SEG_HDRS_L4_MASK \
217 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
218
219
220
221
222
223
224static enum ice_status
225ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
226{
227 u8 i;
228
229 for (i = 0; i < segs_cnt; i++) {
230
231 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
232 !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
233 return ICE_ERR_PARAM;
234
235
236 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
237 !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
238 return ICE_ERR_PARAM;
239 }
240
241 return 0;
242}
243
244
245#define ICE_FLOW_PROT_HDR_SZ_MAC 14
246#define ICE_FLOW_PROT_HDR_SZ_IPV4 20
247#define ICE_FLOW_PROT_HDR_SZ_IPV6 40
248#define ICE_FLOW_PROT_HDR_SZ_TCP 20
249#define ICE_FLOW_PROT_HDR_SZ_UDP 8
250#define ICE_FLOW_PROT_HDR_SZ_SCTP 12
251
252
253
254
255
256
257static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
258{
259 u16 sz = ICE_FLOW_PROT_HDR_SZ_MAC;
260
261
262 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
263 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
264 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
265 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
266
267
268 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
269 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
270 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
271 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
272 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
273 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
274
275 return sz;
276}
277
278
279
280
281
282
283
284
285static enum ice_status
286ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
287{
288 struct ice_flow_prof *prof;
289 u8 i;
290
291 memset(params->ptypes, 0xff, sizeof(params->ptypes));
292
293 prof = params->prof;
294
295 for (i = 0; i < params->prof->segs_cnt; i++) {
296 const unsigned long *src;
297 u32 hdrs;
298
299 hdrs = prof->segs[i].hdrs;
300
301 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
302 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
303 src = !i ? (const unsigned long *)ice_ipv4_ofos_no_l4 :
304 (const unsigned long *)ice_ipv4_il_no_l4;
305 bitmap_and(params->ptypes, params->ptypes, src,
306 ICE_FLOW_PTYPE_MAX);
307 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
308 src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos :
309 (const unsigned long *)ice_ptypes_ipv4_il;
310 bitmap_and(params->ptypes, params->ptypes, src,
311 ICE_FLOW_PTYPE_MAX);
312 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
313 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
314 src = !i ? (const unsigned long *)ice_ipv6_ofos_no_l4 :
315 (const unsigned long *)ice_ipv6_il_no_l4;
316 bitmap_and(params->ptypes, params->ptypes, src,
317 ICE_FLOW_PTYPE_MAX);
318 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
319 src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos :
320 (const unsigned long *)ice_ptypes_ipv6_il;
321 bitmap_and(params->ptypes, params->ptypes, src,
322 ICE_FLOW_PTYPE_MAX);
323 }
324
325 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
326 src = (const unsigned long *)ice_ptypes_udp_il;
327 bitmap_and(params->ptypes, params->ptypes, src,
328 ICE_FLOW_PTYPE_MAX);
329 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
330 bitmap_and(params->ptypes, params->ptypes,
331 (const unsigned long *)ice_ptypes_tcp_il,
332 ICE_FLOW_PTYPE_MAX);
333 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
334 src = (const unsigned long *)ice_ptypes_sctp_il;
335 bitmap_and(params->ptypes, params->ptypes, src,
336 ICE_FLOW_PTYPE_MAX);
337 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
338 if (!i) {
339 src = (const unsigned long *)ice_ptypes_gre_of;
340 bitmap_and(params->ptypes, params->ptypes,
341 src, ICE_FLOW_PTYPE_MAX);
342 }
343 }
344 }
345
346 return 0;
347}
348
349
350
351
352
353
354
355
356
357
358
359
360static enum ice_status
361ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
362 u8 seg, enum ice_flow_field fld)
363{
364 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
365 u8 fv_words = hw->blk[params->blk].es.fvw;
366 struct ice_flow_fld_info *flds;
367 u16 cnt, ese_bits, i;
368 u16 off;
369
370 flds = params->prof->segs[seg].fields;
371
372 switch (fld) {
373 case ICE_FLOW_FIELD_IDX_IPV4_SA:
374 case ICE_FLOW_FIELD_IDX_IPV4_DA:
375 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
376 break;
377 case ICE_FLOW_FIELD_IDX_IPV6_SA:
378 case ICE_FLOW_FIELD_IDX_IPV6_DA:
379 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
380 break;
381 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
382 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
383 prot_id = ICE_PROT_TCP_IL;
384 break;
385 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
386 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
387 prot_id = ICE_PROT_UDP_IL_OR_S;
388 break;
389 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
390 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
391 prot_id = ICE_PROT_SCTP_IL;
392 break;
393 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
394 prot_id = ICE_PROT_GRE_OF;
395 break;
396 default:
397 return ICE_ERR_NOT_IMPL;
398 }
399
400
401
402
403 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
404
405 flds[fld].xtrct.prot_id = prot_id;
406 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
407 ICE_FLOW_FV_EXTRACT_SZ;
408 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
409 flds[fld].xtrct.idx = params->es_cnt;
410
411
412
413
414 cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size,
415 ese_bits);
416
417
418 off = flds[fld].xtrct.off;
419 for (i = 0; i < cnt; i++) {
420 u8 idx;
421
422
423
424
425 if (params->es_cnt >= fv_words)
426 return ICE_ERR_MAX_LIMIT;
427
428
429 if (hw->blk[params->blk].es.reverse)
430 idx = fv_words - params->es_cnt - 1;
431 else
432 idx = params->es_cnt;
433
434 params->es[idx].prot_id = prot_id;
435 params->es[idx].off = off;
436 params->es_cnt++;
437
438 off += ICE_FLOW_FV_EXTRACT_SZ;
439 }
440
441 return 0;
442}
443
444
445
446
447
448
449
450static enum ice_status
451ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
452 u8 seg)
453{
454 u16 fv_words;
455 u16 hdrs_sz;
456 u8 i;
457
458 if (!params->prof->segs[seg].raws_cnt)
459 return 0;
460
461 if (params->prof->segs[seg].raws_cnt >
462 ARRAY_SIZE(params->prof->segs[seg].raws))
463 return ICE_ERR_MAX_LIMIT;
464
465
466 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
467 if (!hdrs_sz)
468 return ICE_ERR_PARAM;
469
470 fv_words = hw->blk[params->blk].es.fvw;
471
472 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
473 struct ice_flow_seg_fld_raw *raw;
474 u16 off, cnt, j;
475
476 raw = ¶ms->prof->segs[seg].raws[i];
477
478
479 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
480 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
481 ICE_FLOW_FV_EXTRACT_SZ;
482 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
483 BITS_PER_BYTE;
484 raw->info.xtrct.idx = params->es_cnt;
485
486
487
488
489 cnt = DIV_ROUND_UP(raw->info.xtrct.disp +
490 (raw->info.src.last * BITS_PER_BYTE),
491 (ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE));
492 off = raw->info.xtrct.off;
493 for (j = 0; j < cnt; j++) {
494 u16 idx;
495
496
497
498
499 if (params->es_cnt >= hw->blk[params->blk].es.count ||
500 params->es_cnt >= ICE_MAX_FV_WORDS)
501 return ICE_ERR_MAX_LIMIT;
502
503
504 if (hw->blk[params->blk].es.reverse)
505 idx = fv_words - params->es_cnt - 1;
506 else
507 idx = params->es_cnt;
508
509 params->es[idx].prot_id = raw->info.xtrct.prot_id;
510 params->es[idx].off = off;
511 params->es_cnt++;
512 off += ICE_FLOW_FV_EXTRACT_SZ;
513 }
514 }
515
516 return 0;
517}
518
519
520
521
522
523
524
525
526
527static enum ice_status
528ice_flow_create_xtrct_seq(struct ice_hw *hw,
529 struct ice_flow_prof_params *params)
530{
531 struct ice_flow_prof *prof = params->prof;
532 enum ice_status status = 0;
533 u8 i;
534
535 for (i = 0; i < prof->segs_cnt; i++) {
536 u8 j;
537
538 for_each_set_bit(j, (unsigned long *)&prof->segs[i].match,
539 ICE_FLOW_FIELD_IDX_MAX) {
540 status = ice_flow_xtract_fld(hw, params, i,
541 (enum ice_flow_field)j);
542 if (status)
543 return status;
544 }
545
546
547 status = ice_flow_xtract_raws(hw, params, i);
548 if (status)
549 return status;
550 }
551
552 return status;
553}
554
555
556
557
558
559
560static enum ice_status
561ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
562{
563 enum ice_status status;
564
565 status = ice_flow_proc_seg_hdrs(params);
566 if (status)
567 return status;
568
569 status = ice_flow_create_xtrct_seq(hw, params);
570 if (status)
571 return status;
572
573 switch (params->blk) {
574 case ICE_BLK_FD:
575 case ICE_BLK_RSS:
576 status = 0;
577 break;
578 default:
579 return ICE_ERR_NOT_IMPL;
580 }
581
582 return status;
583}
584
585#define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
586#define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
587#define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
588
589
590
591
592
593
594
595
596
597
598
599static struct ice_flow_prof *
600ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
601 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
602 u8 segs_cnt, u16 vsi_handle, u32 conds)
603{
604 struct ice_flow_prof *p, *prof = NULL;
605
606 mutex_lock(&hw->fl_profs_locks[blk]);
607 list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
608 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
609 segs_cnt && segs_cnt == p->segs_cnt) {
610 u8 i;
611
612
613 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
614 ice_is_vsi_valid(hw, vsi_handle) &&
615 !test_bit(vsi_handle, p->vsis))
616 continue;
617
618
619
620
621 for (i = 0; i < segs_cnt; i++)
622 if (segs[i].hdrs != p->segs[i].hdrs ||
623 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
624 segs[i].match != p->segs[i].match))
625 break;
626
627
628 if (i == segs_cnt) {
629 prof = p;
630 break;
631 }
632 }
633 mutex_unlock(&hw->fl_profs_locks[blk]);
634
635 return prof;
636}
637
638
639
640
641
642
643
644static struct ice_flow_prof *
645ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
646{
647 struct ice_flow_prof *p;
648
649 list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
650 if (p->id == prof_id)
651 return p;
652
653 return NULL;
654}
655
656
657
658
659
660
661static void
662ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
663{
664 if (!entry)
665 return;
666
667 if (entry->entry)
668 devm_kfree(ice_hw_to_dev(hw), entry->entry);
669
670 devm_kfree(ice_hw_to_dev(hw), entry);
671}
672
673
674
675
676
677
678
679static enum ice_status
680ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
681 struct ice_flow_entry *entry)
682{
683 if (!entry)
684 return ICE_ERR_BAD_PTR;
685
686 list_del(&entry->l_entry);
687
688 ice_dealloc_flow_entry(hw, entry);
689
690 return 0;
691}
692
693
694
695
696
697
698
699
700
701
702
703
704
705static enum ice_status
706ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
707 enum ice_flow_dir dir, u64 prof_id,
708 struct ice_flow_seg_info *segs, u8 segs_cnt,
709 struct ice_flow_prof **prof)
710{
711 struct ice_flow_prof_params *params;
712 enum ice_status status;
713 u8 i;
714
715 if (!prof)
716 return ICE_ERR_BAD_PTR;
717
718 params = kzalloc(sizeof(*params), GFP_KERNEL);
719 if (!params)
720 return ICE_ERR_NO_MEMORY;
721
722 params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof),
723 GFP_KERNEL);
724 if (!params->prof) {
725 status = ICE_ERR_NO_MEMORY;
726 goto free_params;
727 }
728
729
730 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
731 params->es[i].prot_id = ICE_PROT_INVALID;
732 params->es[i].off = ICE_FV_OFFSET_INVAL;
733 }
734
735 params->blk = blk;
736 params->prof->id = prof_id;
737 params->prof->dir = dir;
738 params->prof->segs_cnt = segs_cnt;
739
740
741
742
743 for (i = 0; i < segs_cnt; i++)
744 memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs));
745
746 status = ice_flow_proc_segs(hw, params);
747 if (status) {
748 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
749 goto out;
750 }
751
752
753 status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
754 params->es);
755 if (status) {
756 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
757 goto out;
758 }
759
760 INIT_LIST_HEAD(¶ms->prof->entries);
761 mutex_init(¶ms->prof->entries_lock);
762 *prof = params->prof;
763
764out:
765 if (status)
766 devm_kfree(ice_hw_to_dev(hw), params->prof);
767free_params:
768 kfree(params);
769
770 return status;
771}
772
773
774
775
776
777
778
779
780
781static enum ice_status
782ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
783 struct ice_flow_prof *prof)
784{
785 enum ice_status status;
786
787
788 if (!list_empty(&prof->entries)) {
789 struct ice_flow_entry *e, *t;
790
791 mutex_lock(&prof->entries_lock);
792
793 list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
794 status = ice_flow_rem_entry_sync(hw, blk, e);
795 if (status)
796 break;
797 }
798
799 mutex_unlock(&prof->entries_lock);
800 }
801
802
803 status = ice_rem_prof(hw, blk, prof->id);
804 if (!status) {
805 list_del(&prof->l_entry);
806 mutex_destroy(&prof->entries_lock);
807 devm_kfree(ice_hw_to_dev(hw), prof);
808 }
809
810 return status;
811}
812
813
814
815
816
817
818
819
820
821
822
823static enum ice_status
824ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
825 struct ice_flow_prof *prof, u16 vsi_handle)
826{
827 enum ice_status status = 0;
828
829 if (!test_bit(vsi_handle, prof->vsis)) {
830 status = ice_add_prof_id_flow(hw, blk,
831 ice_get_hw_vsi_num(hw,
832 vsi_handle),
833 prof->id);
834 if (!status)
835 set_bit(vsi_handle, prof->vsis);
836 else
837 ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
838 status);
839 }
840
841 return status;
842}
843
844
845
846
847
848
849
850
851
852
853
854static enum ice_status
855ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
856 struct ice_flow_prof *prof, u16 vsi_handle)
857{
858 enum ice_status status = 0;
859
860 if (test_bit(vsi_handle, prof->vsis)) {
861 status = ice_rem_prof_id_flow(hw, blk,
862 ice_get_hw_vsi_num(hw,
863 vsi_handle),
864 prof->id);
865 if (!status)
866 clear_bit(vsi_handle, prof->vsis);
867 else
868 ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
869 status);
870 }
871
872 return status;
873}
874
875
876
877
878
879
880
881
882
883
884
885enum ice_status
886ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
887 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
888 struct ice_flow_prof **prof)
889{
890 enum ice_status status;
891
892 if (segs_cnt > ICE_FLOW_SEG_MAX)
893 return ICE_ERR_MAX_LIMIT;
894
895 if (!segs_cnt)
896 return ICE_ERR_PARAM;
897
898 if (!segs)
899 return ICE_ERR_BAD_PTR;
900
901 status = ice_flow_val_hdrs(segs, segs_cnt);
902 if (status)
903 return status;
904
905 mutex_lock(&hw->fl_profs_locks[blk]);
906
907 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
908 prof);
909 if (!status)
910 list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
911
912 mutex_unlock(&hw->fl_profs_locks[blk]);
913
914 return status;
915}
916
917
918
919
920
921
922
923enum ice_status
924ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
925{
926 struct ice_flow_prof *prof;
927 enum ice_status status;
928
929 mutex_lock(&hw->fl_profs_locks[blk]);
930
931 prof = ice_flow_find_prof_id(hw, blk, prof_id);
932 if (!prof) {
933 status = ICE_ERR_DOES_NOT_EXIST;
934 goto out;
935 }
936
937
938 status = ice_flow_rem_prof_sync(hw, blk, prof);
939
940out:
941 mutex_unlock(&hw->fl_profs_locks[blk]);
942
943 return status;
944}
945
946
947
948
949
950
951
952
953
954
955
956
957enum ice_status
958ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
959 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
960 void *data, u64 *entry_h)
961{
962 struct ice_flow_entry *e = NULL;
963 struct ice_flow_prof *prof;
964 enum ice_status status;
965
966
967 if (!entry_h || (!data && blk != ICE_BLK_RSS))
968 return ICE_ERR_BAD_PTR;
969
970 if (!ice_is_vsi_valid(hw, vsi_handle))
971 return ICE_ERR_PARAM;
972
973 mutex_lock(&hw->fl_profs_locks[blk]);
974
975 prof = ice_flow_find_prof_id(hw, blk, prof_id);
976 if (!prof) {
977 status = ICE_ERR_DOES_NOT_EXIST;
978 } else {
979
980
981
982 e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL);
983 if (!e)
984 status = ICE_ERR_NO_MEMORY;
985 else
986 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
987 }
988
989 mutex_unlock(&hw->fl_profs_locks[blk]);
990 if (status)
991 goto out;
992
993 e->id = entry_id;
994 e->vsi_handle = vsi_handle;
995 e->prof = prof;
996 e->priority = prio;
997
998 switch (blk) {
999 case ICE_BLK_FD:
1000 case ICE_BLK_RSS:
1001 break;
1002 default:
1003 status = ICE_ERR_NOT_IMPL;
1004 goto out;
1005 }
1006
1007 mutex_lock(&prof->entries_lock);
1008 list_add(&e->l_entry, &prof->entries);
1009 mutex_unlock(&prof->entries_lock);
1010
1011 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
1012
1013out:
1014 if (status && e) {
1015 if (e->entry)
1016 devm_kfree(ice_hw_to_dev(hw), e->entry);
1017 devm_kfree(ice_hw_to_dev(hw), e);
1018 }
1019
1020 return status;
1021}
1022
1023
1024
1025
1026
1027
1028
1029enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
1030 u64 entry_h)
1031{
1032 struct ice_flow_entry *entry;
1033 struct ice_flow_prof *prof;
1034 enum ice_status status = 0;
1035
1036 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
1037 return ICE_ERR_PARAM;
1038
1039 entry = ICE_FLOW_ENTRY_PTR(entry_h);
1040
1041
1042 prof = entry->prof;
1043
1044 if (prof) {
1045 mutex_lock(&prof->entries_lock);
1046 status = ice_flow_rem_entry_sync(hw, blk, entry);
1047 mutex_unlock(&prof->entries_lock);
1048 }
1049
1050 return status;
1051}
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076static void
1077ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1078 enum ice_flow_fld_match_type field_type, u16 val_loc,
1079 u16 mask_loc, u16 last_loc)
1080{
1081 u64 bit = BIT_ULL(fld);
1082
1083 seg->match |= bit;
1084 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
1085 seg->range |= bit;
1086
1087 seg->fields[fld].type = field_type;
1088 seg->fields[fld].src.val = val_loc;
1089 seg->fields[fld].src.mask = mask_loc;
1090 seg->fields[fld].src.last = last_loc;
1091
1092 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
1093}
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115void
1116ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
1117 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
1118{
1119 enum ice_flow_fld_match_type t = range ?
1120 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
1121
1122 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
1123}
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142void
1143ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
1144 u16 val_loc, u16 mask_loc)
1145{
1146 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
1147 seg->raws[seg->raws_cnt].off = off;
1148 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
1149 seg->raws[seg->raws_cnt].info.src.val = val_loc;
1150 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
1151
1152 seg->raws[seg->raws_cnt].info.src.last = len;
1153 }
1154
1155
1156
1157
1158 seg->raws_cnt++;
1159}
1160
1161#define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
1162 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
1163
1164#define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
1165 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
1166
1167#define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
1168 (ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
1169 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181static enum ice_status
1182ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
1183 u32 flow_hdr)
1184{
1185 u64 val;
1186 u8 i;
1187
1188 for_each_set_bit(i, (unsigned long *)&hash_fields,
1189 ICE_FLOW_FIELD_IDX_MAX)
1190 ice_flow_set_fld(segs, (enum ice_flow_field)i,
1191 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1192 ICE_FLOW_FLD_OFF_INVAL, false);
1193
1194 ICE_FLOW_SET_HDRS(segs, flow_hdr);
1195
1196 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
1197 return ICE_ERR_PARAM;
1198
1199 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
1200 if (val && !is_power_of_2(val))
1201 return ICE_ERR_CFG;
1202
1203 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
1204 if (val && !is_power_of_2(val))
1205 return ICE_ERR_CFG;
1206
1207 return 0;
1208}
1209
1210
1211
1212
1213
1214
1215
1216
1217void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
1218{
1219 struct ice_rss_cfg *r, *tmp;
1220
1221 if (list_empty(&hw->rss_list_head))
1222 return;
1223
1224 mutex_lock(&hw->rss_locks);
1225 list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
1226 if (test_and_clear_bit(vsi_handle, r->vsis))
1227 if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
1228 list_del(&r->l_entry);
1229 devm_kfree(ice_hw_to_dev(hw), r);
1230 }
1231 mutex_unlock(&hw->rss_locks);
1232}
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
1244{
1245 const enum ice_block blk = ICE_BLK_RSS;
1246 struct ice_flow_prof *p, *t;
1247 enum ice_status status = 0;
1248
1249 if (!ice_is_vsi_valid(hw, vsi_handle))
1250 return ICE_ERR_PARAM;
1251
1252 if (list_empty(&hw->fl_profs[blk]))
1253 return 0;
1254
1255 mutex_lock(&hw->rss_locks);
1256 list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry)
1257 if (test_bit(vsi_handle, p->vsis)) {
1258 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
1259 if (status)
1260 break;
1261
1262 if (bitmap_empty(p->vsis, ICE_MAX_VSI)) {
1263 status = ice_flow_rem_prof(hw, blk, p->id);
1264 if (status)
1265 break;
1266 }
1267 }
1268 mutex_unlock(&hw->rss_locks);
1269
1270 return status;
1271}
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281static void
1282ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
1283{
1284 struct ice_rss_cfg *r, *tmp;
1285
1286
1287
1288
1289
1290 list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
1291 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
1292 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
1293 clear_bit(vsi_handle, r->vsis);
1294 if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
1295 list_del(&r->l_entry);
1296 devm_kfree(ice_hw_to_dev(hw), r);
1297 }
1298 return;
1299 }
1300}
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310static enum ice_status
1311ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
1312{
1313 struct ice_rss_cfg *r, *rss_cfg;
1314
1315 list_for_each_entry(r, &hw->rss_list_head, l_entry)
1316 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
1317 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
1318 set_bit(vsi_handle, r->vsis);
1319 return 0;
1320 }
1321
1322 rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg),
1323 GFP_KERNEL);
1324 if (!rss_cfg)
1325 return ICE_ERR_NO_MEMORY;
1326
1327 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
1328 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
1329 set_bit(vsi_handle, rss_cfg->vsis);
1330
1331 list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head);
1332
1333 return 0;
1334}
1335
1336#define ICE_FLOW_PROF_HASH_S 0
1337#define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
1338#define ICE_FLOW_PROF_HDR_S 32
1339#define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
1340#define ICE_FLOW_PROF_ENCAP_S 63
1341#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
1342
1343#define ICE_RSS_OUTER_HEADERS 1
1344#define ICE_RSS_INNER_HEADERS 2
1345
1346
1347
1348
1349
1350
1351#define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
1352 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
1353 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
1354 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366static enum ice_status
1367ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
1368 u32 addl_hdrs, u8 segs_cnt)
1369{
1370 const enum ice_block blk = ICE_BLK_RSS;
1371 struct ice_flow_prof *prof = NULL;
1372 struct ice_flow_seg_info *segs;
1373 enum ice_status status;
1374
1375 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
1376 return ICE_ERR_PARAM;
1377
1378 segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
1379 if (!segs)
1380 return ICE_ERR_NO_MEMORY;
1381
1382
1383 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
1384 addl_hdrs);
1385 if (status)
1386 goto exit;
1387
1388
1389
1390
1391
1392 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1393 vsi_handle,
1394 ICE_FLOW_FIND_PROF_CHK_FLDS |
1395 ICE_FLOW_FIND_PROF_CHK_VSI);
1396 if (prof)
1397 goto exit;
1398
1399
1400
1401
1402
1403
1404 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1405 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
1406 if (prof) {
1407 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
1408 if (!status)
1409 ice_rem_rss_list(hw, vsi_handle, prof);
1410 else
1411 goto exit;
1412
1413
1414 if (bitmap_empty(prof->vsis, ICE_MAX_VSI)) {
1415 status = ice_flow_rem_prof(hw, blk, prof->id);
1416 if (status)
1417 goto exit;
1418 }
1419 }
1420
1421
1422
1423
1424 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
1425 vsi_handle,
1426 ICE_FLOW_FIND_PROF_CHK_FLDS);
1427 if (prof) {
1428 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1429 if (!status)
1430 status = ice_add_rss_list(hw, vsi_handle, prof);
1431 goto exit;
1432 }
1433
1434
1435
1436
1437 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
1438 ICE_FLOW_GEN_PROFID(hashed_flds,
1439 segs[segs_cnt - 1].hdrs,
1440 segs_cnt),
1441 segs, segs_cnt, &prof);
1442 if (status)
1443 goto exit;
1444
1445 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
1446
1447
1448
1449 if (status) {
1450 ice_flow_rem_prof(hw, blk, prof->id);
1451 goto exit;
1452 }
1453
1454 status = ice_add_rss_list(hw, vsi_handle, prof);
1455
1456exit:
1457 kfree(segs);
1458 return status;
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472enum ice_status
1473ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
1474 u32 addl_hdrs)
1475{
1476 enum ice_status status;
1477
1478 if (hashed_flds == ICE_HASH_INVALID ||
1479 !ice_is_vsi_valid(hw, vsi_handle))
1480 return ICE_ERR_PARAM;
1481
1482 mutex_lock(&hw->rss_locks);
1483 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
1484 ICE_RSS_OUTER_HEADERS);
1485 if (!status)
1486 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
1487 addl_hdrs, ICE_RSS_INNER_HEADERS);
1488 mutex_unlock(&hw->rss_locks);
1489
1490 return status;
1491}
1492
1493
1494
1495
1496
1497#define ICE_FLOW_AVF_RSS_IPV4_MASKS \
1498 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
1499 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
1500#define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
1501 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
1502 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
1503#define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
1504 (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
1505 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
1506 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
1507#define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
1508 (ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
1509 ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
1510
1511#define ICE_FLOW_AVF_RSS_IPV6_MASKS \
1512 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
1513 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
1514#define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
1515 (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
1516 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
1517 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
1518#define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
1519 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
1520 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
1521#define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
1522 (ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
1523 ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535enum ice_status
1536ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
1537{
1538 enum ice_status status = 0;
1539 u64 hash_flds;
1540
1541 if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
1542 !ice_is_vsi_valid(hw, vsi_handle))
1543 return ICE_ERR_PARAM;
1544
1545
1546 if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
1547 ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
1548 return ICE_ERR_CFG;
1549
1550 hash_flds = avf_hash;
1551
1552
1553 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS)
1554 hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS;
1555
1556 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)
1557 hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS;
1558
1559
1560 while (hash_flds) {
1561 u64 rss_hash = ICE_HASH_INVALID;
1562
1563 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) {
1564 if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) {
1565 rss_hash = ICE_FLOW_HASH_IPV4;
1566 hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS;
1567 } else if (hash_flds &
1568 ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) {
1569 rss_hash = ICE_FLOW_HASH_IPV4 |
1570 ICE_FLOW_HASH_TCP_PORT;
1571 hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS;
1572 } else if (hash_flds &
1573 ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) {
1574 rss_hash = ICE_FLOW_HASH_IPV4 |
1575 ICE_FLOW_HASH_UDP_PORT;
1576 hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
1577 } else if (hash_flds &
1578 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
1579 rss_hash = ICE_FLOW_HASH_IPV4 |
1580 ICE_FLOW_HASH_SCTP_PORT;
1581 hash_flds &=
1582 ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
1583 }
1584 } else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
1585 if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
1586 rss_hash = ICE_FLOW_HASH_IPV6;
1587 hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS;
1588 } else if (hash_flds &
1589 ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) {
1590 rss_hash = ICE_FLOW_HASH_IPV6 |
1591 ICE_FLOW_HASH_TCP_PORT;
1592 hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS;
1593 } else if (hash_flds &
1594 ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) {
1595 rss_hash = ICE_FLOW_HASH_IPV6 |
1596 ICE_FLOW_HASH_UDP_PORT;
1597 hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
1598 } else if (hash_flds &
1599 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
1600 rss_hash = ICE_FLOW_HASH_IPV6 |
1601 ICE_FLOW_HASH_SCTP_PORT;
1602 hash_flds &=
1603 ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
1604 }
1605 }
1606
1607 if (rss_hash == ICE_HASH_INVALID)
1608 return ICE_ERR_OUT_OF_RANGE;
1609
1610 status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
1611 ICE_FLOW_SEG_HDR_NONE);
1612 if (status)
1613 break;
1614 }
1615
1616 return status;
1617}
1618
1619
1620
1621
1622
1623
1624enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
1625{
1626 enum ice_status status = 0;
1627 struct ice_rss_cfg *r;
1628
1629 if (!ice_is_vsi_valid(hw, vsi_handle))
1630 return ICE_ERR_PARAM;
1631
1632 mutex_lock(&hw->rss_locks);
1633 list_for_each_entry(r, &hw->rss_list_head, l_entry) {
1634 if (test_bit(vsi_handle, r->vsis)) {
1635 status = ice_add_rss_cfg_sync(hw, vsi_handle,
1636 r->hashed_flds,
1637 r->packet_hdr,
1638 ICE_RSS_OUTER_HEADERS);
1639 if (status)
1640 break;
1641 status = ice_add_rss_cfg_sync(hw, vsi_handle,
1642 r->hashed_flds,
1643 r->packet_hdr,
1644 ICE_RSS_INNER_HEADERS);
1645 if (status)
1646 break;
1647 }
1648 }
1649 mutex_unlock(&hw->rss_locks);
1650
1651 return status;
1652}
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
1664{
1665 u64 rss_hash = ICE_HASH_INVALID;
1666 struct ice_rss_cfg *r;
1667
1668
1669 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
1670 return ICE_HASH_INVALID;
1671
1672 mutex_lock(&hw->rss_locks);
1673 list_for_each_entry(r, &hw->rss_list_head, l_entry)
1674 if (test_bit(vsi_handle, r->vsis) &&
1675 r->packet_hdr == hdrs) {
1676 rss_hash = r->hashed_flds;
1677 break;
1678 }
1679 mutex_unlock(&hw->rss_locks);
1680
1681 return rss_hash;
1682}
1683