1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <net/tc_act/tc_mirred.h>
36#include <net/tc_act/tc_pedit.h>
37#include <net/tc_act/tc_gact.h>
38#include <net/tc_act/tc_vlan.h>
39
40#include "cxgb4.h"
41#include "cxgb4_filter.h"
42#include "cxgb4_tc_flower.h"
43
44#define STATS_CHECK_PERIOD (HZ / 2)
45
46static struct ch_tc_pedit_fields pedits[] = {
47 PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0),
48 PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4),
49 PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0),
50 PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2),
51 PEDIT_FIELDS(IP4_, SRC, 4, nat_fip, 0),
52 PEDIT_FIELDS(IP4_, DST, 4, nat_lip, 0),
53 PEDIT_FIELDS(IP6_, SRC_31_0, 4, nat_fip, 0),
54 PEDIT_FIELDS(IP6_, SRC_63_32, 4, nat_fip, 4),
55 PEDIT_FIELDS(IP6_, SRC_95_64, 4, nat_fip, 8),
56 PEDIT_FIELDS(IP6_, SRC_127_96, 4, nat_fip, 12),
57 PEDIT_FIELDS(IP6_, DST_31_0, 4, nat_lip, 0),
58 PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
59 PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
60 PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
61};
62
63static struct ch_tc_flower_entry *allocate_flower_entry(void)
64{
65 struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
66 if (new)
67 spin_lock_init(&new->lock);
68 return new;
69}
70
71
72static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
73 unsigned long flower_cookie)
74{
75 return rhashtable_lookup_fast(&adap->flower_tbl, &flower_cookie,
76 adap->flower_ht_params);
77}
78
79static void cxgb4_process_flow_match(struct net_device *dev,
80 struct flow_rule *rule,
81 struct ch_filter_specification *fs)
82{
83 u16 addr_type = 0;
84
85 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
86 struct flow_match_control match;
87
88 flow_rule_match_control(rule, &match);
89 addr_type = match.key->addr_type;
90 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
91 addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
92 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
93 addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
94 }
95
96 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
97 struct flow_match_basic match;
98 u16 ethtype_key, ethtype_mask;
99
100 flow_rule_match_basic(rule, &match);
101 ethtype_key = ntohs(match.key->n_proto);
102 ethtype_mask = ntohs(match.mask->n_proto);
103
104 if (ethtype_key == ETH_P_ALL) {
105 ethtype_key = 0;
106 ethtype_mask = 0;
107 }
108
109 if (ethtype_key == ETH_P_IPV6)
110 fs->type = 1;
111
112 fs->val.ethtype = ethtype_key;
113 fs->mask.ethtype = ethtype_mask;
114 fs->val.proto = match.key->ip_proto;
115 fs->mask.proto = match.mask->ip_proto;
116 }
117
118 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
119 struct flow_match_ipv4_addrs match;
120
121 flow_rule_match_ipv4_addrs(rule, &match);
122 fs->type = 0;
123 memcpy(&fs->val.lip[0], &match.key->dst, sizeof(match.key->dst));
124 memcpy(&fs->val.fip[0], &match.key->src, sizeof(match.key->src));
125 memcpy(&fs->mask.lip[0], &match.mask->dst, sizeof(match.mask->dst));
126 memcpy(&fs->mask.fip[0], &match.mask->src, sizeof(match.mask->src));
127
128
129 memcpy(&fs->nat_lip[0], &match.key->dst, sizeof(match.key->dst));
130 memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src));
131 }
132
133 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
134 struct flow_match_ipv6_addrs match;
135
136 flow_rule_match_ipv6_addrs(rule, &match);
137 fs->type = 1;
138 memcpy(&fs->val.lip[0], match.key->dst.s6_addr,
139 sizeof(match.key->dst));
140 memcpy(&fs->val.fip[0], match.key->src.s6_addr,
141 sizeof(match.key->src));
142 memcpy(&fs->mask.lip[0], match.mask->dst.s6_addr,
143 sizeof(match.mask->dst));
144 memcpy(&fs->mask.fip[0], match.mask->src.s6_addr,
145 sizeof(match.mask->src));
146
147
148 memcpy(&fs->nat_lip[0], match.key->dst.s6_addr,
149 sizeof(match.key->dst));
150 memcpy(&fs->nat_fip[0], match.key->src.s6_addr,
151 sizeof(match.key->src));
152 }
153
154 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
155 struct flow_match_ports match;
156
157 flow_rule_match_ports(rule, &match);
158 fs->val.lport = be16_to_cpu(match.key->dst);
159 fs->mask.lport = be16_to_cpu(match.mask->dst);
160 fs->val.fport = be16_to_cpu(match.key->src);
161 fs->mask.fport = be16_to_cpu(match.mask->src);
162
163
164 fs->nat_lport = fs->val.lport;
165 fs->nat_fport = fs->val.fport;
166 }
167
168 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
169 struct flow_match_ip match;
170
171 flow_rule_match_ip(rule, &match);
172 fs->val.tos = match.key->tos;
173 fs->mask.tos = match.mask->tos;
174 }
175
176 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
177 struct flow_match_enc_keyid match;
178
179 flow_rule_match_enc_keyid(rule, &match);
180 fs->val.vni = be32_to_cpu(match.key->keyid);
181 fs->mask.vni = be32_to_cpu(match.mask->keyid);
182 if (fs->mask.vni) {
183 fs->val.encap_vld = 1;
184 fs->mask.encap_vld = 1;
185 }
186 }
187
188 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
189 struct flow_match_vlan match;
190 u16 vlan_tci, vlan_tci_mask;
191
192 flow_rule_match_vlan(rule, &match);
193 vlan_tci = match.key->vlan_id | (match.key->vlan_priority <<
194 VLAN_PRIO_SHIFT);
195 vlan_tci_mask = match.mask->vlan_id | (match.mask->vlan_priority <<
196 VLAN_PRIO_SHIFT);
197 fs->val.ivlan = vlan_tci;
198 fs->mask.ivlan = vlan_tci_mask;
199
200 fs->val.ivlan_vld = 1;
201 fs->mask.ivlan_vld = 1;
202
203
204
205
206
207
208
209
210
211
212 if (fs->val.ethtype == ETH_P_8021Q) {
213 fs->val.ethtype = 0;
214 fs->mask.ethtype = 0;
215 }
216 }
217
218
219
220
221 fs->val.iport = netdev2pinfo(dev)->port_id;
222 fs->mask.iport = ~0;
223}
224
225static int cxgb4_validate_flow_match(struct net_device *dev,
226 struct flow_rule *rule)
227{
228 struct flow_dissector *dissector = rule->match.dissector;
229 u16 ethtype_mask = 0;
230 u16 ethtype_key = 0;
231
232 if (dissector->used_keys &
233 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
234 BIT(FLOW_DISSECTOR_KEY_BASIC) |
235 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
236 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
237 BIT(FLOW_DISSECTOR_KEY_PORTS) |
238 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
239 BIT(FLOW_DISSECTOR_KEY_VLAN) |
240 BIT(FLOW_DISSECTOR_KEY_IP))) {
241 netdev_warn(dev, "Unsupported key used: 0x%x\n",
242 dissector->used_keys);
243 return -EOPNOTSUPP;
244 }
245
246 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
247 struct flow_match_basic match;
248
249 flow_rule_match_basic(rule, &match);
250 ethtype_key = ntohs(match.key->n_proto);
251 ethtype_mask = ntohs(match.mask->n_proto);
252 }
253
254 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
255 u16 eth_ip_type = ethtype_key & ethtype_mask;
256 struct flow_match_ip match;
257
258 if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
259 netdev_err(dev, "IP Key supported only with IPv4/v6");
260 return -EINVAL;
261 }
262
263 flow_rule_match_ip(rule, &match);
264 if (match.mask->ttl) {
265 netdev_warn(dev, "ttl match unsupported for offload");
266 return -EOPNOTSUPP;
267 }
268 }
269
270 return 0;
271}
272
273static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
274 u8 field)
275{
276 u32 set_val = val & ~mask;
277 u32 offset = 0;
278 u8 size = 1;
279 int i;
280
281 for (i = 0; i < ARRAY_SIZE(pedits); i++) {
282 if (pedits[i].field == field) {
283 offset = pedits[i].offset;
284 size = pedits[i].size;
285 break;
286 }
287 }
288 memcpy((u8 *)fs + offset, &set_val, size);
289}
290
291static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
292 u32 mask, u32 offset, u8 htype)
293{
294 switch (htype) {
295 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
296 switch (offset) {
297 case PEDIT_ETH_DMAC_31_0:
298 fs->newdmac = 1;
299 offload_pedit(fs, val, mask, ETH_DMAC_31_0);
300 break;
301 case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
302 if (~mask & PEDIT_ETH_DMAC_MASK)
303 offload_pedit(fs, val, mask, ETH_DMAC_47_32);
304 else
305 offload_pedit(fs, val >> 16, mask >> 16,
306 ETH_SMAC_15_0);
307 break;
308 case PEDIT_ETH_SMAC_47_16:
309 fs->newsmac = 1;
310 offload_pedit(fs, val, mask, ETH_SMAC_47_16);
311 }
312 break;
313 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
314 switch (offset) {
315 case PEDIT_IP4_SRC:
316 offload_pedit(fs, val, mask, IP4_SRC);
317 break;
318 case PEDIT_IP4_DST:
319 offload_pedit(fs, val, mask, IP4_DST);
320 }
321 fs->nat_mode = NAT_MODE_ALL;
322 break;
323 case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
324 switch (offset) {
325 case PEDIT_IP6_SRC_31_0:
326 offload_pedit(fs, val, mask, IP6_SRC_31_0);
327 break;
328 case PEDIT_IP6_SRC_63_32:
329 offload_pedit(fs, val, mask, IP6_SRC_63_32);
330 break;
331 case PEDIT_IP6_SRC_95_64:
332 offload_pedit(fs, val, mask, IP6_SRC_95_64);
333 break;
334 case PEDIT_IP6_SRC_127_96:
335 offload_pedit(fs, val, mask, IP6_SRC_127_96);
336 break;
337 case PEDIT_IP6_DST_31_0:
338 offload_pedit(fs, val, mask, IP6_DST_31_0);
339 break;
340 case PEDIT_IP6_DST_63_32:
341 offload_pedit(fs, val, mask, IP6_DST_63_32);
342 break;
343 case PEDIT_IP6_DST_95_64:
344 offload_pedit(fs, val, mask, IP6_DST_95_64);
345 break;
346 case PEDIT_IP6_DST_127_96:
347 offload_pedit(fs, val, mask, IP6_DST_127_96);
348 }
349 fs->nat_mode = NAT_MODE_ALL;
350 break;
351 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
352 switch (offset) {
353 case PEDIT_TCP_SPORT_DPORT:
354 if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
355 fs->nat_fport = val;
356 else
357 fs->nat_lport = val >> 16;
358 }
359 fs->nat_mode = NAT_MODE_ALL;
360 break;
361 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
362 switch (offset) {
363 case PEDIT_UDP_SPORT_DPORT:
364 if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
365 fs->nat_fport = val;
366 else
367 fs->nat_lport = val >> 16;
368 }
369 fs->nat_mode = NAT_MODE_ALL;
370 }
371}
372
373void cxgb4_process_flow_actions(struct net_device *in,
374 struct flow_action *actions,
375 struct ch_filter_specification *fs)
376{
377 struct flow_action_entry *act;
378 int i;
379
380 flow_action_for_each(i, act, actions) {
381 switch (act->id) {
382 case FLOW_ACTION_ACCEPT:
383 fs->action = FILTER_PASS;
384 break;
385 case FLOW_ACTION_DROP:
386 fs->action = FILTER_DROP;
387 break;
388 case FLOW_ACTION_MIRRED:
389 case FLOW_ACTION_REDIRECT: {
390 struct net_device *out = act->dev;
391 struct port_info *pi = netdev_priv(out);
392
393 fs->action = FILTER_SWITCH;
394 fs->eport = pi->port_id;
395 }
396 break;
397 case FLOW_ACTION_VLAN_POP:
398 case FLOW_ACTION_VLAN_PUSH:
399 case FLOW_ACTION_VLAN_MANGLE: {
400 u8 prio = act->vlan.prio;
401 u16 vid = act->vlan.vid;
402 u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid;
403 switch (act->id) {
404 case FLOW_ACTION_VLAN_POP:
405 fs->newvlan |= VLAN_REMOVE;
406 break;
407 case FLOW_ACTION_VLAN_PUSH:
408 fs->newvlan |= VLAN_INSERT;
409 fs->vlan = vlan_tci;
410 break;
411 case FLOW_ACTION_VLAN_MANGLE:
412 fs->newvlan |= VLAN_REWRITE;
413 fs->vlan = vlan_tci;
414 break;
415 default:
416 break;
417 }
418 }
419 break;
420 case FLOW_ACTION_MANGLE: {
421 u32 mask, val, offset;
422 u8 htype;
423
424 htype = act->mangle.htype;
425 mask = act->mangle.mask;
426 val = act->mangle.val;
427 offset = act->mangle.offset;
428
429 process_pedit_field(fs, val, mask, offset, htype);
430 }
431 break;
432 case FLOW_ACTION_QUEUE:
433 fs->action = FILTER_PASS;
434 fs->dirsteer = 1;
435 fs->iq = act->queue.index;
436 break;
437 default:
438 break;
439 }
440 }
441}
442
443static bool valid_l4_mask(u32 mask)
444{
445 u16 hi, lo;
446
447
448
449
450 hi = (mask >> 16) & 0xFFFF;
451 lo = mask & 0xFFFF;
452
453 return hi && lo ? false : true;
454}
455
456static bool valid_pedit_action(struct net_device *dev,
457 const struct flow_action_entry *act)
458{
459 u32 mask, offset;
460 u8 htype;
461
462 htype = act->mangle.htype;
463 mask = act->mangle.mask;
464 offset = act->mangle.offset;
465
466 switch (htype) {
467 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
468 switch (offset) {
469 case PEDIT_ETH_DMAC_31_0:
470 case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
471 case PEDIT_ETH_SMAC_47_16:
472 break;
473 default:
474 netdev_err(dev, "%s: Unsupported pedit field\n",
475 __func__);
476 return false;
477 }
478 break;
479 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
480 switch (offset) {
481 case PEDIT_IP4_SRC:
482 case PEDIT_IP4_DST:
483 break;
484 default:
485 netdev_err(dev, "%s: Unsupported pedit field\n",
486 __func__);
487 return false;
488 }
489 break;
490 case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
491 switch (offset) {
492 case PEDIT_IP6_SRC_31_0:
493 case PEDIT_IP6_SRC_63_32:
494 case PEDIT_IP6_SRC_95_64:
495 case PEDIT_IP6_SRC_127_96:
496 case PEDIT_IP6_DST_31_0:
497 case PEDIT_IP6_DST_63_32:
498 case PEDIT_IP6_DST_95_64:
499 case PEDIT_IP6_DST_127_96:
500 break;
501 default:
502 netdev_err(dev, "%s: Unsupported pedit field\n",
503 __func__);
504 return false;
505 }
506 break;
507 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
508 switch (offset) {
509 case PEDIT_TCP_SPORT_DPORT:
510 if (!valid_l4_mask(~mask)) {
511 netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
512 __func__);
513 return false;
514 }
515 break;
516 default:
517 netdev_err(dev, "%s: Unsupported pedit field\n",
518 __func__);
519 return false;
520 }
521 break;
522 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
523 switch (offset) {
524 case PEDIT_UDP_SPORT_DPORT:
525 if (!valid_l4_mask(~mask)) {
526 netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
527 __func__);
528 return false;
529 }
530 break;
531 default:
532 netdev_err(dev, "%s: Unsupported pedit field\n",
533 __func__);
534 return false;
535 }
536 break;
537 default:
538 netdev_err(dev, "%s: Unsupported pedit type\n", __func__);
539 return false;
540 }
541 return true;
542}
543
544int cxgb4_validate_flow_actions(struct net_device *dev,
545 struct flow_action *actions,
546 struct netlink_ext_ack *extack,
547 u8 matchall_filter)
548{
549 struct flow_action_entry *act;
550 bool act_redir = false;
551 bool act_pedit = false;
552 bool act_vlan = false;
553 int i;
554
555 if (!flow_action_basic_hw_stats_check(actions, extack))
556 return -EOPNOTSUPP;
557
558 flow_action_for_each(i, act, actions) {
559 switch (act->id) {
560 case FLOW_ACTION_ACCEPT:
561 case FLOW_ACTION_DROP:
562
563 break;
564 case FLOW_ACTION_MIRRED:
565 case FLOW_ACTION_REDIRECT: {
566 struct adapter *adap = netdev2adap(dev);
567 struct net_device *n_dev, *target_dev;
568 bool found = false;
569 unsigned int i;
570
571 if (act->id == FLOW_ACTION_MIRRED &&
572 !matchall_filter) {
573 NL_SET_ERR_MSG_MOD(extack,
574 "Egress mirror action is only supported for tc-matchall");
575 return -EOPNOTSUPP;
576 }
577
578 target_dev = act->dev;
579 for_each_port(adap, i) {
580 n_dev = adap->port[i];
581 if (target_dev == n_dev) {
582 found = true;
583 break;
584 }
585 }
586
587
588
589
590 if (!found) {
591 netdev_err(dev, "%s: Out port invalid\n",
592 __func__);
593 return -EINVAL;
594 }
595 act_redir = true;
596 }
597 break;
598 case FLOW_ACTION_VLAN_POP:
599 case FLOW_ACTION_VLAN_PUSH:
600 case FLOW_ACTION_VLAN_MANGLE: {
601 u16 proto = be16_to_cpu(act->vlan.proto);
602
603 switch (act->id) {
604 case FLOW_ACTION_VLAN_POP:
605 break;
606 case FLOW_ACTION_VLAN_PUSH:
607 case FLOW_ACTION_VLAN_MANGLE:
608 if (proto != ETH_P_8021Q) {
609 netdev_err(dev, "%s: Unsupported vlan proto\n",
610 __func__);
611 return -EOPNOTSUPP;
612 }
613 break;
614 default:
615 netdev_err(dev, "%s: Unsupported vlan action\n",
616 __func__);
617 return -EOPNOTSUPP;
618 }
619 act_vlan = true;
620 }
621 break;
622 case FLOW_ACTION_MANGLE: {
623 bool pedit_valid = valid_pedit_action(dev, act);
624
625 if (!pedit_valid)
626 return -EOPNOTSUPP;
627 act_pedit = true;
628 }
629 break;
630 case FLOW_ACTION_QUEUE:
631
632 break;
633 default:
634 netdev_err(dev, "%s: Unsupported action\n", __func__);
635 return -EOPNOTSUPP;
636 }
637 }
638
639 if ((act_pedit || act_vlan) && !act_redir) {
640 netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n",
641 __func__);
642 return -EINVAL;
643 }
644
645 return 0;
646}
647
648static void cxgb4_tc_flower_hash_prio_add(struct adapter *adap, u32 tc_prio)
649{
650 spin_lock_bh(&adap->tids.ftid_lock);
651 if (adap->tids.tc_hash_tids_max_prio < tc_prio)
652 adap->tids.tc_hash_tids_max_prio = tc_prio;
653 spin_unlock_bh(&adap->tids.ftid_lock);
654}
655
656static void cxgb4_tc_flower_hash_prio_del(struct adapter *adap, u32 tc_prio)
657{
658 struct tid_info *t = &adap->tids;
659 struct ch_tc_flower_entry *fe;
660 struct rhashtable_iter iter;
661 u32 found = 0;
662
663 spin_lock_bh(&t->ftid_lock);
664
665
666
667 if (t->tc_hash_tids_max_prio != tc_prio)
668 goto out_unlock;
669
670
671
672
673 rhashtable_walk_enter(&adap->flower_tbl, &iter);
674 do {
675 rhashtable_walk_start(&iter);
676
677 fe = rhashtable_walk_next(&iter);
678 while (!IS_ERR_OR_NULL(fe)) {
679 if (fe->fs.hash &&
680 fe->fs.tc_prio <= t->tc_hash_tids_max_prio) {
681 t->tc_hash_tids_max_prio = fe->fs.tc_prio;
682 found++;
683
684
685
686
687
688 if (fe->fs.tc_prio == tc_prio)
689 break;
690 }
691
692 fe = rhashtable_walk_next(&iter);
693 }
694
695 rhashtable_walk_stop(&iter);
696 } while (fe == ERR_PTR(-EAGAIN));
697 rhashtable_walk_exit(&iter);
698
699 if (!found)
700 t->tc_hash_tids_max_prio = 0;
701
702out_unlock:
703 spin_unlock_bh(&t->ftid_lock);
704}
705
706int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule,
707 u32 tc_prio, struct netlink_ext_ack *extack,
708 struct ch_filter_specification *fs, u32 *tid)
709{
710 struct adapter *adap = netdev2adap(dev);
711 struct filter_ctx ctx;
712 u8 inet_family;
713 int fidx, ret;
714
715 if (cxgb4_validate_flow_actions(dev, &rule->action, extack, 0))
716 return -EOPNOTSUPP;
717
718 if (cxgb4_validate_flow_match(dev, rule))
719 return -EOPNOTSUPP;
720
721 cxgb4_process_flow_match(dev, rule, fs);
722 cxgb4_process_flow_actions(dev, &rule->action, fs);
723
724 fs->hash = is_filter_exact_match(adap, fs);
725 inet_family = fs->type ? PF_INET6 : PF_INET;
726
727
728
729
730
731 fidx = cxgb4_get_free_ftid(dev, inet_family, fs->hash,
732 tc_prio);
733 if (fidx < 0) {
734 NL_SET_ERR_MSG_MOD(extack,
735 "No free LETCAM index available");
736 return -ENOMEM;
737 }
738
739 if (fidx < adap->tids.nhpftids) {
740 fs->prio = 1;
741 fs->hash = 0;
742 }
743
744
745
746
747 if (fs->hash)
748 fidx = 0;
749
750 fs->tc_prio = tc_prio;
751
752 init_completion(&ctx.completion);
753 ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
754 if (ret) {
755 netdev_err(dev, "%s: filter creation err %d\n",
756 __func__, ret);
757 return ret;
758 }
759
760
761 ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
762 if (!ret)
763 return -ETIMEDOUT;
764
765
766 if (ctx.result)
767 return ctx.result;
768
769 *tid = ctx.tid;
770
771 if (fs->hash)
772 cxgb4_tc_flower_hash_prio_add(adap, tc_prio);
773
774 return 0;
775}
776
777int cxgb4_tc_flower_replace(struct net_device *dev,
778 struct flow_cls_offload *cls)
779{
780 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
781 struct netlink_ext_ack *extack = cls->common.extack;
782 struct adapter *adap = netdev2adap(dev);
783 struct ch_tc_flower_entry *ch_flower;
784 struct ch_filter_specification *fs;
785 int ret;
786
787 ch_flower = allocate_flower_entry();
788 if (!ch_flower) {
789 netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__);
790 return -ENOMEM;
791 }
792
793 fs = &ch_flower->fs;
794 fs->hitcnts = 1;
795 fs->tc_cookie = cls->cookie;
796
797 ret = cxgb4_flow_rule_replace(dev, rule, cls->common.prio, extack, fs,
798 &ch_flower->filter_id);
799 if (ret)
800 goto free_entry;
801
802 ch_flower->tc_flower_cookie = cls->cookie;
803 ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node,
804 adap->flower_ht_params);
805 if (ret)
806 goto del_filter;
807
808 return 0;
809
810del_filter:
811 if (fs->hash)
812 cxgb4_tc_flower_hash_prio_del(adap, cls->common.prio);
813
814 cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
815
816free_entry:
817 kfree(ch_flower);
818 return ret;
819}
820
821int cxgb4_flow_rule_destroy(struct net_device *dev, u32 tc_prio,
822 struct ch_filter_specification *fs, int tid)
823{
824 struct adapter *adap = netdev2adap(dev);
825 u8 hash;
826 int ret;
827
828 hash = fs->hash;
829
830 ret = cxgb4_del_filter(dev, tid, fs);
831 if (ret)
832 return ret;
833
834 if (hash)
835 cxgb4_tc_flower_hash_prio_del(adap, tc_prio);
836
837 return ret;
838}
839
840int cxgb4_tc_flower_destroy(struct net_device *dev,
841 struct flow_cls_offload *cls)
842{
843 struct adapter *adap = netdev2adap(dev);
844 struct ch_tc_flower_entry *ch_flower;
845 int ret;
846
847 ch_flower = ch_flower_lookup(adap, cls->cookie);
848 if (!ch_flower)
849 return -ENOENT;
850
851 ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio,
852 &ch_flower->fs, ch_flower->filter_id);
853 if (ret)
854 goto err;
855
856 ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
857 adap->flower_ht_params);
858 if (ret) {
859 netdev_err(dev, "Flow remove from rhashtable failed");
860 goto err;
861 }
862 kfree_rcu(ch_flower, rcu);
863
864err:
865 return ret;
866}
867
868static void ch_flower_stats_handler(struct work_struct *work)
869{
870 struct adapter *adap = container_of(work, struct adapter,
871 flower_stats_work);
872 struct ch_tc_flower_entry *flower_entry;
873 struct ch_tc_flower_stats *ofld_stats;
874 struct rhashtable_iter iter;
875 u64 packets;
876 u64 bytes;
877 int ret;
878
879 rhashtable_walk_enter(&adap->flower_tbl, &iter);
880 do {
881 rhashtable_walk_start(&iter);
882
883 while ((flower_entry = rhashtable_walk_next(&iter)) &&
884 !IS_ERR(flower_entry)) {
885 ret = cxgb4_get_filter_counters(adap->port[0],
886 flower_entry->filter_id,
887 &packets, &bytes,
888 flower_entry->fs.hash);
889 if (!ret) {
890 spin_lock(&flower_entry->lock);
891 ofld_stats = &flower_entry->stats;
892
893 if (ofld_stats->prev_packet_count != packets) {
894 ofld_stats->prev_packet_count = packets;
895 ofld_stats->last_used = jiffies;
896 }
897 spin_unlock(&flower_entry->lock);
898 }
899 }
900
901 rhashtable_walk_stop(&iter);
902
903 } while (flower_entry == ERR_PTR(-EAGAIN));
904 rhashtable_walk_exit(&iter);
905 mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
906}
907
908static void ch_flower_stats_cb(struct timer_list *t)
909{
910 struct adapter *adap = from_timer(adap, t, flower_stats_timer);
911
912 schedule_work(&adap->flower_stats_work);
913}
914
915int cxgb4_tc_flower_stats(struct net_device *dev,
916 struct flow_cls_offload *cls)
917{
918 struct adapter *adap = netdev2adap(dev);
919 struct ch_tc_flower_stats *ofld_stats;
920 struct ch_tc_flower_entry *ch_flower;
921 u64 packets;
922 u64 bytes;
923 int ret;
924
925 ch_flower = ch_flower_lookup(adap, cls->cookie);
926 if (!ch_flower) {
927 ret = -ENOENT;
928 goto err;
929 }
930
931 ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id,
932 &packets, &bytes,
933 ch_flower->fs.hash);
934 if (ret < 0)
935 goto err;
936
937 spin_lock_bh(&ch_flower->lock);
938 ofld_stats = &ch_flower->stats;
939 if (ofld_stats->packet_count != packets) {
940 if (ofld_stats->prev_packet_count != packets)
941 ofld_stats->last_used = jiffies;
942 flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count,
943 packets - ofld_stats->packet_count, 0,
944 ofld_stats->last_used,
945 FLOW_ACTION_HW_STATS_IMMEDIATE);
946
947 ofld_stats->packet_count = packets;
948 ofld_stats->byte_count = bytes;
949 ofld_stats->prev_packet_count = packets;
950 }
951 spin_unlock_bh(&ch_flower->lock);
952 return 0;
953
954err:
955 return ret;
956}
957
958static const struct rhashtable_params cxgb4_tc_flower_ht_params = {
959 .nelem_hint = 384,
960 .head_offset = offsetof(struct ch_tc_flower_entry, node),
961 .key_offset = offsetof(struct ch_tc_flower_entry, tc_flower_cookie),
962 .key_len = sizeof(((struct ch_tc_flower_entry *)0)->tc_flower_cookie),
963 .max_size = 524288,
964 .min_size = 512,
965 .automatic_shrinking = true
966};
967
968int cxgb4_init_tc_flower(struct adapter *adap)
969{
970 int ret;
971
972 if (adap->tc_flower_initialized)
973 return -EEXIST;
974
975 adap->flower_ht_params = cxgb4_tc_flower_ht_params;
976 ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
977 if (ret)
978 return ret;
979
980 INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
981 timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
982 mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
983 adap->tc_flower_initialized = true;
984 return 0;
985}
986
987void cxgb4_cleanup_tc_flower(struct adapter *adap)
988{
989 if (!adap->tc_flower_initialized)
990 return;
991
992 if (adap->flower_stats_timer.function)
993 del_timer_sync(&adap->flower_stats_timer);
994 cancel_work_sync(&adap->flower_stats_work);
995 rhashtable_destroy(&adap->flower_tbl);
996 adap->tc_flower_initialized = false;
997}
998