1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/kernel.h>
36#include <linux/errno.h>
37#include <linux/netdevice.h>
38#include <net/flow_dissector.h>
39#include <net/pkt_cls.h>
40#include <net/tc_act/tc_gact.h>
41#include <net/tc_act/tc_mirred.h>
42#include <net/tc_act/tc_vlan.h>
43
44#include "spectrum.h"
45#include "core_acl_flex_keys.h"
46
47static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
48 struct net_device *dev, bool ingress,
49 struct mlxsw_sp_acl_rule_info *rulei,
50 struct tcf_exts *exts)
51{
52 const struct tc_action *a;
53 LIST_HEAD(actions);
54 int err;
55
56 if (!tcf_exts_has_actions(exts))
57 return 0;
58
59
60 err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei);
61 if (err)
62 return err;
63
64 tcf_exts_to_list(exts, &actions);
65 list_for_each_entry(a, &actions, list) {
66 if (is_tcf_gact_ok(a)) {
67 err = mlxsw_sp_acl_rulei_act_continue(rulei);
68 if (err)
69 return err;
70 } else if (is_tcf_gact_shot(a)) {
71 err = mlxsw_sp_acl_rulei_act_drop(rulei);
72 if (err)
73 return err;
74 } else if (is_tcf_gact_trap(a)) {
75 err = mlxsw_sp_acl_rulei_act_trap(rulei);
76 if (err)
77 return err;
78 } else if (is_tcf_gact_goto_chain(a)) {
79 u32 chain_index = tcf_gact_goto_chain_index(a);
80 struct mlxsw_sp_acl_ruleset *ruleset;
81 u16 group_id;
82
83 ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, dev,
84 ingress,
85 chain_index,
86 MLXSW_SP_ACL_PROFILE_FLOWER);
87 if (IS_ERR(ruleset))
88 return PTR_ERR(ruleset);
89
90 group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
91 err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
92 if (err)
93 return err;
94 } else if (is_tcf_mirred_egress_redirect(a)) {
95 int ifindex = tcf_mirred_ifindex(a);
96 struct net_device *out_dev;
97 struct mlxsw_sp_fid *fid;
98 u16 fid_index;
99
100 fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
101 fid_index = mlxsw_sp_fid_index(fid);
102 err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
103 fid_index);
104 if (err)
105 return err;
106
107 out_dev = __dev_get_by_index(dev_net(dev), ifindex);
108 if (out_dev == dev)
109 out_dev = NULL;
110
111 err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
112 out_dev);
113 if (err)
114 return err;
115 } else if (is_tcf_vlan(a)) {
116 u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
117 u32 action = tcf_vlan_action(a);
118 u8 prio = tcf_vlan_push_prio(a);
119 u16 vid = tcf_vlan_push_vid(a);
120
121 return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
122 action, vid,
123 proto, prio);
124 } else {
125 dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
126 return -EOPNOTSUPP;
127 }
128 }
129 return 0;
130}
131
132static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
133 struct tc_cls_flower_offload *f)
134{
135 struct flow_dissector_key_ipv4_addrs *key =
136 skb_flow_dissector_target(f->dissector,
137 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
138 f->key);
139 struct flow_dissector_key_ipv4_addrs *mask =
140 skb_flow_dissector_target(f->dissector,
141 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
142 f->mask);
143
144 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4,
145 ntohl(key->src), ntohl(mask->src));
146 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4,
147 ntohl(key->dst), ntohl(mask->dst));
148}
149
150static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
151 struct tc_cls_flower_offload *f)
152{
153 struct flow_dissector_key_ipv6_addrs *key =
154 skb_flow_dissector_target(f->dissector,
155 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
156 f->key);
157 struct flow_dissector_key_ipv6_addrs *mask =
158 skb_flow_dissector_target(f->dissector,
159 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
160 f->mask);
161 size_t addr_half_size = sizeof(key->src) / 2;
162
163 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI,
164 &key->src.s6_addr[0],
165 &mask->src.s6_addr[0],
166 addr_half_size);
167 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO,
168 &key->src.s6_addr[addr_half_size],
169 &mask->src.s6_addr[addr_half_size],
170 addr_half_size);
171 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI,
172 &key->dst.s6_addr[0],
173 &mask->dst.s6_addr[0],
174 addr_half_size);
175 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO,
176 &key->dst.s6_addr[addr_half_size],
177 &mask->dst.s6_addr[addr_half_size],
178 addr_half_size);
179}
180
181static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
182 struct mlxsw_sp_acl_rule_info *rulei,
183 struct tc_cls_flower_offload *f,
184 u8 ip_proto)
185{
186 struct flow_dissector_key_ports *key, *mask;
187
188 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS))
189 return 0;
190
191 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
192 dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
193 return -EINVAL;
194 }
195
196 key = skb_flow_dissector_target(f->dissector,
197 FLOW_DISSECTOR_KEY_PORTS,
198 f->key);
199 mask = skb_flow_dissector_target(f->dissector,
200 FLOW_DISSECTOR_KEY_PORTS,
201 f->mask);
202 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
203 ntohs(key->dst), ntohs(mask->dst));
204 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
205 ntohs(key->src), ntohs(mask->src));
206 return 0;
207}
208
209static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
210 struct mlxsw_sp_acl_rule_info *rulei,
211 struct tc_cls_flower_offload *f,
212 u8 ip_proto)
213{
214 struct flow_dissector_key_tcp *key, *mask;
215
216 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP))
217 return 0;
218
219 if (ip_proto != IPPROTO_TCP) {
220 dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
221 return -EINVAL;
222 }
223
224 key = skb_flow_dissector_target(f->dissector,
225 FLOW_DISSECTOR_KEY_TCP,
226 f->key);
227 mask = skb_flow_dissector_target(f->dissector,
228 FLOW_DISSECTOR_KEY_TCP,
229 f->mask);
230 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
231 ntohs(key->flags), ntohs(mask->flags));
232 return 0;
233}
234
235static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
236 struct mlxsw_sp_acl_rule_info *rulei,
237 struct tc_cls_flower_offload *f,
238 u16 n_proto)
239{
240 struct flow_dissector_key_ip *key, *mask;
241
242 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP))
243 return 0;
244
245 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
246 dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
247 return -EINVAL;
248 }
249
250 key = skb_flow_dissector_target(f->dissector,
251 FLOW_DISSECTOR_KEY_IP,
252 f->key);
253 mask = skb_flow_dissector_target(f->dissector,
254 FLOW_DISSECTOR_KEY_IP,
255 f->mask);
256 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
257 key->ttl, mask->ttl);
258
259 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
260 key->tos & 0x3, mask->tos & 0x3);
261
262 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
263 key->tos >> 6, mask->tos >> 6);
264
265 return 0;
266}
267
268static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
269 struct net_device *dev, bool ingress,
270 struct mlxsw_sp_acl_rule_info *rulei,
271 struct tc_cls_flower_offload *f)
272{
273 u16 n_proto_mask = 0;
274 u16 n_proto_key = 0;
275 u16 addr_type = 0;
276 u8 ip_proto = 0;
277 int err;
278
279 if (f->dissector->used_keys &
280 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
281 BIT(FLOW_DISSECTOR_KEY_BASIC) |
282 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
283 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
284 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
285 BIT(FLOW_DISSECTOR_KEY_PORTS) |
286 BIT(FLOW_DISSECTOR_KEY_TCP) |
287 BIT(FLOW_DISSECTOR_KEY_IP) |
288 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
289 dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
290 return -EOPNOTSUPP;
291 }
292
293 mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
294
295 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
296 struct flow_dissector_key_control *key =
297 skb_flow_dissector_target(f->dissector,
298 FLOW_DISSECTOR_KEY_CONTROL,
299 f->key);
300 addr_type = key->addr_type;
301 }
302
303 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
304 struct flow_dissector_key_basic *key =
305 skb_flow_dissector_target(f->dissector,
306 FLOW_DISSECTOR_KEY_BASIC,
307 f->key);
308 struct flow_dissector_key_basic *mask =
309 skb_flow_dissector_target(f->dissector,
310 FLOW_DISSECTOR_KEY_BASIC,
311 f->mask);
312 n_proto_key = ntohs(key->n_proto);
313 n_proto_mask = ntohs(mask->n_proto);
314
315 if (n_proto_key == ETH_P_ALL) {
316 n_proto_key = 0;
317 n_proto_mask = 0;
318 }
319 mlxsw_sp_acl_rulei_keymask_u32(rulei,
320 MLXSW_AFK_ELEMENT_ETHERTYPE,
321 n_proto_key, n_proto_mask);
322
323 ip_proto = key->ip_proto;
324 mlxsw_sp_acl_rulei_keymask_u32(rulei,
325 MLXSW_AFK_ELEMENT_IP_PROTO,
326 key->ip_proto, mask->ip_proto);
327 }
328
329 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
330 struct flow_dissector_key_eth_addrs *key =
331 skb_flow_dissector_target(f->dissector,
332 FLOW_DISSECTOR_KEY_ETH_ADDRS,
333 f->key);
334 struct flow_dissector_key_eth_addrs *mask =
335 skb_flow_dissector_target(f->dissector,
336 FLOW_DISSECTOR_KEY_ETH_ADDRS,
337 f->mask);
338
339 mlxsw_sp_acl_rulei_keymask_buf(rulei,
340 MLXSW_AFK_ELEMENT_DMAC,
341 key->dst, mask->dst,
342 sizeof(key->dst));
343 mlxsw_sp_acl_rulei_keymask_buf(rulei,
344 MLXSW_AFK_ELEMENT_SMAC,
345 key->src, mask->src,
346 sizeof(key->src));
347 }
348
349 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
350 struct flow_dissector_key_vlan *key =
351 skb_flow_dissector_target(f->dissector,
352 FLOW_DISSECTOR_KEY_VLAN,
353 f->key);
354 struct flow_dissector_key_vlan *mask =
355 skb_flow_dissector_target(f->dissector,
356 FLOW_DISSECTOR_KEY_VLAN,
357 f->mask);
358 if (mask->vlan_id != 0)
359 mlxsw_sp_acl_rulei_keymask_u32(rulei,
360 MLXSW_AFK_ELEMENT_VID,
361 key->vlan_id,
362 mask->vlan_id);
363 if (mask->vlan_priority != 0)
364 mlxsw_sp_acl_rulei_keymask_u32(rulei,
365 MLXSW_AFK_ELEMENT_PCP,
366 key->vlan_priority,
367 mask->vlan_priority);
368 }
369
370 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
371 mlxsw_sp_flower_parse_ipv4(rulei, f);
372
373 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
374 mlxsw_sp_flower_parse_ipv6(rulei, f);
375
376 err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
377 if (err)
378 return err;
379 err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
380 if (err)
381 return err;
382
383 err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
384 if (err)
385 return err;
386
387 return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, ingress,
388 rulei, f->exts);
389}
390
391int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
392 struct tc_cls_flower_offload *f)
393{
394 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
395 struct net_device *dev = mlxsw_sp_port->dev;
396 struct mlxsw_sp_acl_rule_info *rulei;
397 struct mlxsw_sp_acl_ruleset *ruleset;
398 struct mlxsw_sp_acl_rule *rule;
399 int err;
400
401 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress,
402 f->common.chain_index,
403 MLXSW_SP_ACL_PROFILE_FLOWER);
404 if (IS_ERR(ruleset))
405 return PTR_ERR(ruleset);
406
407 rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie);
408 if (IS_ERR(rule)) {
409 err = PTR_ERR(rule);
410 goto err_rule_create;
411 }
412
413 rulei = mlxsw_sp_acl_rule_rulei(rule);
414 err = mlxsw_sp_flower_parse(mlxsw_sp, dev, ingress, rulei, f);
415 if (err)
416 goto err_flower_parse;
417
418 err = mlxsw_sp_acl_rulei_commit(rulei);
419 if (err)
420 goto err_rulei_commit;
421
422 err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
423 if (err)
424 goto err_rule_add;
425
426 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
427 return 0;
428
429err_rule_add:
430err_rulei_commit:
431err_flower_parse:
432 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
433err_rule_create:
434 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
435 return err;
436}
437
438void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
439 struct tc_cls_flower_offload *f)
440{
441 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
442 struct mlxsw_sp_acl_ruleset *ruleset;
443 struct mlxsw_sp_acl_rule *rule;
444
445 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
446 ingress, f->common.chain_index,
447 MLXSW_SP_ACL_PROFILE_FLOWER);
448 if (IS_ERR(ruleset))
449 return;
450
451 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
452 if (rule) {
453 mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
454 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
455 }
456
457 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
458}
459
460int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
461 struct tc_cls_flower_offload *f)
462{
463 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
464 struct mlxsw_sp_acl_ruleset *ruleset;
465 struct mlxsw_sp_acl_rule *rule;
466 u64 packets;
467 u64 lastuse;
468 u64 bytes;
469 int err;
470
471 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
472 ingress, f->common.chain_index,
473 MLXSW_SP_ACL_PROFILE_FLOWER);
474 if (WARN_ON(IS_ERR(ruleset)))
475 return -EINVAL;
476
477 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
478 if (!rule)
479 return -EINVAL;
480
481 err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
482 &lastuse);
483 if (err)
484 goto err_rule_get_stats;
485
486 tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
487
488 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
489 return 0;
490
491err_rule_get_stats:
492 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
493 return err;
494}
495