1
2
3
4#include <linux/kernel.h>
5#include <linux/errno.h>
6#include <linux/netdevice.h>
7#include <net/net_namespace.h>
8#include <net/flow_dissector.h>
9#include <net/pkt_cls.h>
10#include <net/tc_act/tc_gact.h>
11#include <net/tc_act/tc_mirred.h>
12#include <net/tc_act/tc_vlan.h>
13
14#include "spectrum.h"
15#include "core_acl_flex_keys.h"
16
17static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
18 struct mlxsw_sp_acl_block *block,
19 struct mlxsw_sp_acl_rule_info *rulei,
20 struct tcf_exts *exts,
21 struct netlink_ext_ack *extack)
22{
23 const struct tc_action *a;
24 int err, i;
25
26 if (!tcf_exts_has_actions(exts))
27 return 0;
28
29
30 err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
31 if (err)
32 return err;
33
34 tcf_exts_for_each_action(i, a, exts) {
35 if (is_tcf_gact_ok(a)) {
36 err = mlxsw_sp_acl_rulei_act_terminate(rulei);
37 if (err) {
38 NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
39 return err;
40 }
41 } else if (is_tcf_gact_shot(a)) {
42 err = mlxsw_sp_acl_rulei_act_drop(rulei);
43 if (err) {
44 NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
45 return err;
46 }
47 } else if (is_tcf_gact_trap(a)) {
48 err = mlxsw_sp_acl_rulei_act_trap(rulei);
49 if (err) {
50 NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
51 return err;
52 }
53 } else if (is_tcf_gact_goto_chain(a)) {
54 u32 chain_index = tcf_gact_goto_chain_index(a);
55 struct mlxsw_sp_acl_ruleset *ruleset;
56 u16 group_id;
57
58 ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
59 chain_index,
60 MLXSW_SP_ACL_PROFILE_FLOWER);
61 if (IS_ERR(ruleset))
62 return PTR_ERR(ruleset);
63
64 group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
65 err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
66 if (err) {
67 NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
68 return err;
69 }
70 } else if (is_tcf_mirred_egress_redirect(a)) {
71 struct net_device *out_dev;
72 struct mlxsw_sp_fid *fid;
73 u16 fid_index;
74
75 fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
76 fid_index = mlxsw_sp_fid_index(fid);
77 err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
78 fid_index, extack);
79 if (err)
80 return err;
81
82 out_dev = tcf_mirred_dev(a);
83 err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
84 out_dev, extack);
85 if (err)
86 return err;
87 } else if (is_tcf_mirred_egress_mirror(a)) {
88 struct net_device *out_dev = tcf_mirred_dev(a);
89
90 err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
91 block, out_dev,
92 extack);
93 if (err)
94 return err;
95 } else if (is_tcf_vlan(a)) {
96 u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
97 u32 action = tcf_vlan_action(a);
98 u8 prio = tcf_vlan_push_prio(a);
99 u16 vid = tcf_vlan_push_vid(a);
100
101 return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
102 action, vid,
103 proto, prio, extack);
104 } else {
105 NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
106 dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
107 return -EOPNOTSUPP;
108 }
109 }
110 return 0;
111}
112
113static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
114 struct tc_cls_flower_offload *f)
115{
116 struct flow_dissector_key_ipv4_addrs *key =
117 skb_flow_dissector_target(f->dissector,
118 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
119 f->key);
120 struct flow_dissector_key_ipv4_addrs *mask =
121 skb_flow_dissector_target(f->dissector,
122 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
123 f->mask);
124
125 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
126 (char *) &key->src,
127 (char *) &mask->src, 4);
128 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
129 (char *) &key->dst,
130 (char *) &mask->dst, 4);
131}
132
133static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
134 struct tc_cls_flower_offload *f)
135{
136 struct flow_dissector_key_ipv6_addrs *key =
137 skb_flow_dissector_target(f->dissector,
138 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
139 f->key);
140 struct flow_dissector_key_ipv6_addrs *mask =
141 skb_flow_dissector_target(f->dissector,
142 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
143 f->mask);
144
145 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
146 &key->src.s6_addr[0x0],
147 &mask->src.s6_addr[0x0], 4);
148 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
149 &key->src.s6_addr[0x4],
150 &mask->src.s6_addr[0x4], 4);
151 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
152 &key->src.s6_addr[0x8],
153 &mask->src.s6_addr[0x8], 4);
154 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
155 &key->src.s6_addr[0xC],
156 &mask->src.s6_addr[0xC], 4);
157 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
158 &key->dst.s6_addr[0x0],
159 &mask->dst.s6_addr[0x0], 4);
160 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
161 &key->dst.s6_addr[0x4],
162 &mask->dst.s6_addr[0x4], 4);
163 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
164 &key->dst.s6_addr[0x8],
165 &mask->dst.s6_addr[0x8], 4);
166 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
167 &key->dst.s6_addr[0xC],
168 &mask->dst.s6_addr[0xC], 4);
169}
170
171static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
172 struct mlxsw_sp_acl_rule_info *rulei,
173 struct tc_cls_flower_offload *f,
174 u8 ip_proto)
175{
176 struct flow_dissector_key_ports *key, *mask;
177
178 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS))
179 return 0;
180
181 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
182 NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
183 dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
184 return -EINVAL;
185 }
186
187 key = skb_flow_dissector_target(f->dissector,
188 FLOW_DISSECTOR_KEY_PORTS,
189 f->key);
190 mask = skb_flow_dissector_target(f->dissector,
191 FLOW_DISSECTOR_KEY_PORTS,
192 f->mask);
193 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
194 ntohs(key->dst), ntohs(mask->dst));
195 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
196 ntohs(key->src), ntohs(mask->src));
197 return 0;
198}
199
200static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
201 struct mlxsw_sp_acl_rule_info *rulei,
202 struct tc_cls_flower_offload *f,
203 u8 ip_proto)
204{
205 struct flow_dissector_key_tcp *key, *mask;
206
207 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP))
208 return 0;
209
210 if (ip_proto != IPPROTO_TCP) {
211 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP");
212 dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
213 return -EINVAL;
214 }
215
216 key = skb_flow_dissector_target(f->dissector,
217 FLOW_DISSECTOR_KEY_TCP,
218 f->key);
219 mask = skb_flow_dissector_target(f->dissector,
220 FLOW_DISSECTOR_KEY_TCP,
221 f->mask);
222 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
223 ntohs(key->flags), ntohs(mask->flags));
224 return 0;
225}
226
227static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
228 struct mlxsw_sp_acl_rule_info *rulei,
229 struct tc_cls_flower_offload *f,
230 u16 n_proto)
231{
232 struct flow_dissector_key_ip *key, *mask;
233
234 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP))
235 return 0;
236
237 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
238 NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6");
239 dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
240 return -EINVAL;
241 }
242
243 key = skb_flow_dissector_target(f->dissector,
244 FLOW_DISSECTOR_KEY_IP,
245 f->key);
246 mask = skb_flow_dissector_target(f->dissector,
247 FLOW_DISSECTOR_KEY_IP,
248 f->mask);
249 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
250 key->ttl, mask->ttl);
251
252 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
253 key->tos & 0x3, mask->tos & 0x3);
254
255 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
256 key->tos >> 6, mask->tos >> 6);
257
258 return 0;
259}
260
261static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
262 struct mlxsw_sp_acl_block *block,
263 struct mlxsw_sp_acl_rule_info *rulei,
264 struct tc_cls_flower_offload *f)
265{
266 u16 n_proto_mask = 0;
267 u16 n_proto_key = 0;
268 u16 addr_type = 0;
269 u8 ip_proto = 0;
270 int err;
271
272 if (f->dissector->used_keys &
273 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
274 BIT(FLOW_DISSECTOR_KEY_BASIC) |
275 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
276 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
277 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
278 BIT(FLOW_DISSECTOR_KEY_PORTS) |
279 BIT(FLOW_DISSECTOR_KEY_TCP) |
280 BIT(FLOW_DISSECTOR_KEY_IP) |
281 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
282 dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
283 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
284 return -EOPNOTSUPP;
285 }
286
287 mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
288
289 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
290 struct flow_dissector_key_control *key =
291 skb_flow_dissector_target(f->dissector,
292 FLOW_DISSECTOR_KEY_CONTROL,
293 f->key);
294 addr_type = key->addr_type;
295 }
296
297 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
298 struct flow_dissector_key_basic *key =
299 skb_flow_dissector_target(f->dissector,
300 FLOW_DISSECTOR_KEY_BASIC,
301 f->key);
302 struct flow_dissector_key_basic *mask =
303 skb_flow_dissector_target(f->dissector,
304 FLOW_DISSECTOR_KEY_BASIC,
305 f->mask);
306 n_proto_key = ntohs(key->n_proto);
307 n_proto_mask = ntohs(mask->n_proto);
308
309 if (n_proto_key == ETH_P_ALL) {
310 n_proto_key = 0;
311 n_proto_mask = 0;
312 }
313 mlxsw_sp_acl_rulei_keymask_u32(rulei,
314 MLXSW_AFK_ELEMENT_ETHERTYPE,
315 n_proto_key, n_proto_mask);
316
317 ip_proto = key->ip_proto;
318 mlxsw_sp_acl_rulei_keymask_u32(rulei,
319 MLXSW_AFK_ELEMENT_IP_PROTO,
320 key->ip_proto, mask->ip_proto);
321 }
322
323 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
324 struct flow_dissector_key_eth_addrs *key =
325 skb_flow_dissector_target(f->dissector,
326 FLOW_DISSECTOR_KEY_ETH_ADDRS,
327 f->key);
328 struct flow_dissector_key_eth_addrs *mask =
329 skb_flow_dissector_target(f->dissector,
330 FLOW_DISSECTOR_KEY_ETH_ADDRS,
331 f->mask);
332
333 mlxsw_sp_acl_rulei_keymask_buf(rulei,
334 MLXSW_AFK_ELEMENT_DMAC_32_47,
335 key->dst, mask->dst, 2);
336 mlxsw_sp_acl_rulei_keymask_buf(rulei,
337 MLXSW_AFK_ELEMENT_DMAC_0_31,
338 key->dst + 2, mask->dst + 2, 4);
339 mlxsw_sp_acl_rulei_keymask_buf(rulei,
340 MLXSW_AFK_ELEMENT_SMAC_32_47,
341 key->src, mask->src, 2);
342 mlxsw_sp_acl_rulei_keymask_buf(rulei,
343 MLXSW_AFK_ELEMENT_SMAC_0_31,
344 key->src + 2, mask->src + 2, 4);
345 }
346
347 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
348 struct flow_dissector_key_vlan *key =
349 skb_flow_dissector_target(f->dissector,
350 FLOW_DISSECTOR_KEY_VLAN,
351 f->key);
352 struct flow_dissector_key_vlan *mask =
353 skb_flow_dissector_target(f->dissector,
354 FLOW_DISSECTOR_KEY_VLAN,
355 f->mask);
356
357 if (mlxsw_sp_acl_block_is_egress_bound(block)) {
358 NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
359 return -EOPNOTSUPP;
360 }
361 if (mask->vlan_id != 0)
362 mlxsw_sp_acl_rulei_keymask_u32(rulei,
363 MLXSW_AFK_ELEMENT_VID,
364 key->vlan_id,
365 mask->vlan_id);
366 if (mask->vlan_priority != 0)
367 mlxsw_sp_acl_rulei_keymask_u32(rulei,
368 MLXSW_AFK_ELEMENT_PCP,
369 key->vlan_priority,
370 mask->vlan_priority);
371 }
372
373 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
374 mlxsw_sp_flower_parse_ipv4(rulei, f);
375
376 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
377 mlxsw_sp_flower_parse_ipv6(rulei, f);
378
379 err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
380 if (err)
381 return err;
382 err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
383 if (err)
384 return err;
385
386 err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
387 if (err)
388 return err;
389
390 return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts,
391 f->common.extack);
392}
393
394int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
395 struct mlxsw_sp_acl_block *block,
396 struct tc_cls_flower_offload *f)
397{
398 struct mlxsw_sp_acl_rule_info *rulei;
399 struct mlxsw_sp_acl_ruleset *ruleset;
400 struct mlxsw_sp_acl_rule *rule;
401 int err;
402
403 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
404 f->common.chain_index,
405 MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
406 if (IS_ERR(ruleset))
407 return PTR_ERR(ruleset);
408
409 rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie,
410 f->common.extack);
411 if (IS_ERR(rule)) {
412 err = PTR_ERR(rule);
413 goto err_rule_create;
414 }
415
416 rulei = mlxsw_sp_acl_rule_rulei(rule);
417 err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
418 if (err)
419 goto err_flower_parse;
420
421 err = mlxsw_sp_acl_rulei_commit(rulei);
422 if (err)
423 goto err_rulei_commit;
424
425 err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
426 if (err)
427 goto err_rule_add;
428
429 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
430 return 0;
431
432err_rule_add:
433err_rulei_commit:
434err_flower_parse:
435 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
436err_rule_create:
437 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
438 return err;
439}
440
441void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
442 struct mlxsw_sp_acl_block *block,
443 struct tc_cls_flower_offload *f)
444{
445 struct mlxsw_sp_acl_ruleset *ruleset;
446 struct mlxsw_sp_acl_rule *rule;
447
448 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
449 f->common.chain_index,
450 MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
451 if (IS_ERR(ruleset))
452 return;
453
454 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
455 if (rule) {
456 mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
457 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
458 }
459
460 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
461}
462
463int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
464 struct mlxsw_sp_acl_block *block,
465 struct tc_cls_flower_offload *f)
466{
467 struct mlxsw_sp_acl_ruleset *ruleset;
468 struct mlxsw_sp_acl_rule *rule;
469 u64 packets;
470 u64 lastuse;
471 u64 bytes;
472 int err;
473
474 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
475 f->common.chain_index,
476 MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
477 if (WARN_ON(IS_ERR(ruleset)))
478 return -EINVAL;
479
480 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
481 if (!rule)
482 return -EINVAL;
483
484 err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
485 &lastuse);
486 if (err)
487 goto err_rule_get_stats;
488
489 tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
490
491 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
492 return 0;
493
494err_rule_get_stats:
495 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
496 return err;
497}
498
499int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
500 struct mlxsw_sp_acl_block *block,
501 struct tc_cls_flower_offload *f)
502{
503 struct mlxsw_sp_acl_ruleset *ruleset;
504 struct mlxsw_sp_acl_rule_info rulei;
505 int err;
506
507 memset(&rulei, 0, sizeof(rulei));
508 err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f);
509 if (err)
510 return err;
511 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
512 f->common.chain_index,
513 MLXSW_SP_ACL_PROFILE_FLOWER,
514 &rulei.values.elusage);
515
516
517 return PTR_ERR_OR_ZERO(ruleset);
518}
519
520void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
521 struct mlxsw_sp_acl_block *block,
522 struct tc_cls_flower_offload *f)
523{
524 struct mlxsw_sp_acl_ruleset *ruleset;
525
526 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
527 f->common.chain_index,
528 MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
529 if (IS_ERR(ruleset))
530 return;
531
532 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
533 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
534}
535