1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/bitfield.h>
35#include <net/pkt_cls.h>
36#include <net/switchdev.h>
37#include <net/tc_act/tc_gact.h>
38#include <net/tc_act/tc_mirred.h>
39#include <net/tc_act/tc_pedit.h>
40#include <net/tc_act/tc_vlan.h>
41#include <net/tc_act/tc_tunnel_key.h>
42
43#include "cmsg.h"
44#include "main.h"
45#include "../nfp_net_repr.h"
46
47static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
48{
49 size_t act_size = sizeof(struct nfp_fl_pop_vlan);
50 u16 tmp_pop_vlan_op;
51
52 tmp_pop_vlan_op =
53 FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
54 FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_POP_VLAN);
55
56 pop_vlan->a_op = cpu_to_be16(tmp_pop_vlan_op);
57 pop_vlan->reserved = 0;
58}
59
60static void
61nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
62 const struct tc_action *action)
63{
64 size_t act_size = sizeof(struct nfp_fl_push_vlan);
65 struct tcf_vlan *vlan = to_vlan(action);
66 u16 tmp_push_vlan_tci;
67 u16 tmp_push_vlan_op;
68
69 tmp_push_vlan_op =
70 FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
71 FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_PUSH_VLAN);
72
73 push_vlan->a_op = cpu_to_be16(tmp_push_vlan_op);
74
75 push_vlan->reserved = 0;
76 push_vlan->vlan_tpid = tcf_vlan_push_proto(action);
77
78 tmp_push_vlan_tci =
79 FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, vlan->tcfv_push_prio) |
80 FIELD_PREP(NFP_FL_PUSH_VLAN_VID, vlan->tcfv_push_vid) |
81 NFP_FL_PUSH_VLAN_CFI;
82 push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
83}
84
85static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
86 enum nfp_flower_tun_type tun_type)
87{
88 if (!out_dev->rtnl_link_ops)
89 return false;
90
91 if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
92 return tun_type == NFP_FL_TUNNEL_VXLAN;
93
94 return false;
95}
96
97static int
98nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
99 struct nfp_fl_payload *nfp_flow, bool last,
100 struct net_device *in_dev, enum nfp_flower_tun_type tun_type,
101 int *tun_out_cnt)
102{
103 size_t act_size = sizeof(struct nfp_fl_output);
104 u16 tmp_output_op, tmp_flags;
105 struct net_device *out_dev;
106 int ifindex;
107
108
109 tmp_output_op =
110 FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
111 FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_OUTPUT);
112
113 output->a_op = cpu_to_be16(tmp_output_op);
114
115 ifindex = tcf_mirred_ifindex(action);
116 out_dev = __dev_get_by_index(dev_net(in_dev), ifindex);
117 if (!out_dev)
118 return -EOPNOTSUPP;
119
120 tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
121
122 if (tun_type) {
123
124 if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
125 return -EOPNOTSUPP;
126
127 if (*tun_out_cnt)
128 return -EOPNOTSUPP;
129 (*tun_out_cnt)++;
130
131 output->flags = cpu_to_be16(tmp_flags |
132 NFP_FL_OUT_FLAGS_USE_TUN);
133 output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
134 } else {
135
136 output->flags = cpu_to_be16(tmp_flags);
137
138
139
140
141 if (!switchdev_port_same_parent_id(in_dev, out_dev))
142 return -EOPNOTSUPP;
143 if (!nfp_netdev_is_nfp_repr(out_dev))
144 return -EOPNOTSUPP;
145
146 output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
147 if (!output->port)
148 return -EOPNOTSUPP;
149 }
150 nfp_flow->meta.shortcut = output->port;
151
152 return 0;
153}
154
155static bool nfp_fl_supported_tun_port(const struct tc_action *action)
156{
157 struct ip_tunnel_info *tun = tcf_tunnel_info(action);
158
159 return tun->key.tp_dst == htons(NFP_FL_VXLAN_PORT);
160}
161
162static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
163{
164 size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
165 struct nfp_fl_pre_tunnel *pre_tun_act;
166 u16 tmp_pre_tun_op;
167
168
169
170
171 if (act_len)
172 memmove(act_data + act_size, act_data, act_len);
173
174 pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
175
176 memset(pre_tun_act, 0, act_size);
177
178 tmp_pre_tun_op =
179 FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
180 FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_PRE_TUNNEL);
181
182 pre_tun_act->a_op = cpu_to_be16(tmp_pre_tun_op);
183
184 return pre_tun_act;
185}
186
187static int
188nfp_fl_set_vxlan(struct nfp_fl_set_vxlan *set_vxlan,
189 const struct tc_action *action,
190 struct nfp_fl_pre_tunnel *pre_tun)
191{
192 struct ip_tunnel_info *vxlan = tcf_tunnel_info(action);
193 size_t act_size = sizeof(struct nfp_fl_set_vxlan);
194 u32 tmp_set_vxlan_type_index = 0;
195 u16 tmp_set_vxlan_op;
196
197 int pretun_idx = 0;
198
199 if (vxlan->options_len) {
200
201 return -EOPNOTSUPP;
202 }
203
204 tmp_set_vxlan_op =
205 FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
206 FIELD_PREP(NFP_FL_ACT_JMP_ID,
207 NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL);
208
209 set_vxlan->a_op = cpu_to_be16(tmp_set_vxlan_op);
210
211
212 tmp_set_vxlan_type_index |=
213 FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, NFP_FL_TUNNEL_VXLAN) |
214 FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
215
216 set_vxlan->tun_type_index = cpu_to_be32(tmp_set_vxlan_type_index);
217
218 set_vxlan->tun_id = vxlan->key.tun_id;
219 set_vxlan->tun_flags = vxlan->key.tun_flags;
220 set_vxlan->ipv4_ttl = vxlan->key.ttl;
221 set_vxlan->ipv4_tos = vxlan->key.tos;
222
223
224 pre_tun->ipv4_dst = vxlan->key.u.ipv4.dst;
225
226 return 0;
227}
228
229static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
230{
231 u32 oldvalue = get_unaligned((u32 *)p_exact);
232 u32 oldmask = get_unaligned((u32 *)p_mask);
233
234 value &= mask;
235 value |= oldvalue & ~mask;
236
237 put_unaligned(oldmask | mask, (u32 *)p_mask);
238 put_unaligned(value, (u32 *)p_exact);
239}
240
241static int
242nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
243 struct nfp_fl_set_eth *set_eth)
244{
245 u16 tmp_set_eth_op;
246 u32 exact, mask;
247
248 if (off + 4 > ETH_ALEN * 2)
249 return -EOPNOTSUPP;
250
251 mask = ~tcf_pedit_mask(action, idx);
252 exact = tcf_pedit_val(action, idx);
253
254 if (exact & ~mask)
255 return -EOPNOTSUPP;
256
257 nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
258 &set_eth->eth_addr_mask[off]);
259
260 set_eth->reserved = cpu_to_be16(0);
261 tmp_set_eth_op = FIELD_PREP(NFP_FL_ACT_LEN_LW,
262 sizeof(*set_eth) >> NFP_FL_LW_SIZ) |
263 FIELD_PREP(NFP_FL_ACT_JMP_ID,
264 NFP_FL_ACTION_OPCODE_SET_ETHERNET);
265 set_eth->a_op = cpu_to_be16(tmp_set_eth_op);
266
267 return 0;
268}
269
270static int
271nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
272 struct nfp_fl_set_ip4_addrs *set_ip_addr)
273{
274 u16 tmp_set_ipv4_op;
275 __be32 exact, mask;
276
277
278 mask = (__force __be32)~tcf_pedit_mask(action, idx);
279 exact = (__force __be32)tcf_pedit_val(action, idx);
280
281 if (exact & ~mask)
282 return -EOPNOTSUPP;
283
284 switch (off) {
285 case offsetof(struct iphdr, daddr):
286 set_ip_addr->ipv4_dst_mask = mask;
287 set_ip_addr->ipv4_dst = exact;
288 break;
289 case offsetof(struct iphdr, saddr):
290 set_ip_addr->ipv4_src_mask = mask;
291 set_ip_addr->ipv4_src = exact;
292 break;
293 default:
294 return -EOPNOTSUPP;
295 }
296
297 set_ip_addr->reserved = cpu_to_be16(0);
298 tmp_set_ipv4_op = FIELD_PREP(NFP_FL_ACT_LEN_LW,
299 sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ) |
300 FIELD_PREP(NFP_FL_ACT_JMP_ID,
301 NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS);
302 set_ip_addr->a_op = cpu_to_be16(tmp_set_ipv4_op);
303
304 return 0;
305}
306
307static void
308nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask,
309 struct nfp_fl_set_ipv6_addr *ip6)
310{
311 u16 tmp_set_op;
312
313 ip6->ipv6[idx % 4].mask = mask;
314 ip6->ipv6[idx % 4].exact = exact;
315
316 ip6->reserved = cpu_to_be16(0);
317 tmp_set_op = FIELD_PREP(NFP_FL_ACT_LEN_LW, sizeof(*ip6) >>
318 NFP_FL_LW_SIZ) |
319 FIELD_PREP(NFP_FL_ACT_JMP_ID, opcode_tag);
320 ip6->a_op = cpu_to_be16(tmp_set_op);
321}
322
323static int
324nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
325 struct nfp_fl_set_ipv6_addr *ip_dst,
326 struct nfp_fl_set_ipv6_addr *ip_src)
327{
328 __be32 exact, mask;
329
330
331 mask = (__force __be32)~tcf_pedit_mask(action, idx);
332 exact = (__force __be32)tcf_pedit_val(action, idx);
333
334 if (exact & ~mask)
335 return -EOPNOTSUPP;
336
337 if (off < offsetof(struct ipv6hdr, saddr))
338 return -EOPNOTSUPP;
339 else if (off < offsetof(struct ipv6hdr, daddr))
340 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx,
341 exact, mask, ip_src);
342 else if (off < offsetof(struct ipv6hdr, daddr) +
343 sizeof(struct in6_addr))
344 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx,
345 exact, mask, ip_dst);
346 else
347 return -EOPNOTSUPP;
348
349 return 0;
350}
351
352static int
353nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
354 struct nfp_fl_set_tport *set_tport, int opcode)
355{
356 u32 exact, mask;
357 u16 tmp_set_op;
358
359 if (off)
360 return -EOPNOTSUPP;
361
362 mask = ~tcf_pedit_mask(action, idx);
363 exact = tcf_pedit_val(action, idx);
364
365 if (exact & ~mask)
366 return -EOPNOTSUPP;
367
368 nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
369 set_tport->tp_port_mask);
370
371 set_tport->reserved = cpu_to_be16(0);
372 tmp_set_op = FIELD_PREP(NFP_FL_ACT_LEN_LW,
373 sizeof(*set_tport) >> NFP_FL_LW_SIZ);
374 tmp_set_op |= FIELD_PREP(NFP_FL_ACT_JMP_ID, opcode);
375 set_tport->a_op = cpu_to_be16(tmp_set_op);
376
377 return 0;
378}
379
380static int
381nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
382{
383 struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
384 struct nfp_fl_set_ip4_addrs set_ip_addr;
385 struct nfp_fl_set_tport set_tport;
386 struct nfp_fl_set_eth set_eth;
387 enum pedit_header_type htype;
388 int idx, nkeys, err;
389 size_t act_size;
390 u32 offset, cmd;
391
392 memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
393 memset(&set_ip6_src, 0, sizeof(set_ip6_src));
394 memset(&set_ip_addr, 0, sizeof(set_ip_addr));
395 memset(&set_tport, 0, sizeof(set_tport));
396 memset(&set_eth, 0, sizeof(set_eth));
397 nkeys = tcf_pedit_nkeys(action);
398
399 for (idx = 0; idx < nkeys; idx++) {
400 cmd = tcf_pedit_cmd(action, idx);
401 htype = tcf_pedit_htype(action, idx);
402 offset = tcf_pedit_offset(action, idx);
403
404 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET)
405 return -EOPNOTSUPP;
406
407 switch (htype) {
408 case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
409 err = nfp_fl_set_eth(action, idx, offset, &set_eth);
410 break;
411 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
412 err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr);
413 break;
414 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
415 err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
416 &set_ip6_src);
417 break;
418 case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
419 err = nfp_fl_set_tport(action, idx, offset, &set_tport,
420 NFP_FL_ACTION_OPCODE_SET_TCP);
421 break;
422 case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
423 err = nfp_fl_set_tport(action, idx, offset, &set_tport,
424 NFP_FL_ACTION_OPCODE_SET_UDP);
425 break;
426 default:
427 return -EOPNOTSUPP;
428 }
429 if (err)
430 return err;
431 }
432
433 if (set_eth.a_op) {
434 act_size = sizeof(set_eth);
435 memcpy(nfp_action, &set_eth, act_size);
436 *a_len += act_size;
437 } else if (set_ip_addr.a_op) {
438 act_size = sizeof(set_ip_addr);
439 memcpy(nfp_action, &set_ip_addr, act_size);
440 *a_len += act_size;
441 } else if (set_ip6_dst.a_op && set_ip6_src.a_op) {
442
443
444
445 act_size = sizeof(set_ip6_src);
446 memcpy(nfp_action, &set_ip6_src, act_size);
447 *a_len += act_size;
448
449 act_size = sizeof(set_ip6_dst);
450 memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
451 act_size);
452 *a_len += act_size;
453 } else if (set_ip6_dst.a_op) {
454 act_size = sizeof(set_ip6_dst);
455 memcpy(nfp_action, &set_ip6_dst, act_size);
456 *a_len += act_size;
457 } else if (set_ip6_src.a_op) {
458 act_size = sizeof(set_ip6_src);
459 memcpy(nfp_action, &set_ip6_src, act_size);
460 *a_len += act_size;
461 } else if (set_tport.a_op) {
462 act_size = sizeof(set_tport);
463 memcpy(nfp_action, &set_tport, act_size);
464 *a_len += act_size;
465 }
466
467 return 0;
468}
469
470static int
471nfp_flower_loop_action(const struct tc_action *a,
472 struct nfp_fl_payload *nfp_fl, int *a_len,
473 struct net_device *netdev,
474 enum nfp_flower_tun_type *tun_type, int *tun_out_cnt)
475{
476 struct nfp_fl_pre_tunnel *pre_tun;
477 struct nfp_fl_set_vxlan *s_vxl;
478 struct nfp_fl_push_vlan *psh_v;
479 struct nfp_fl_pop_vlan *pop_v;
480 struct nfp_fl_output *output;
481 int err;
482
483 if (is_tcf_gact_shot(a)) {
484 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
485 } else if (is_tcf_mirred_egress_redirect(a)) {
486 if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
487 return -EOPNOTSUPP;
488
489 output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
490 err = nfp_fl_output(output, a, nfp_fl, true, netdev, *tun_type,
491 tun_out_cnt);
492 if (err)
493 return err;
494
495 *a_len += sizeof(struct nfp_fl_output);
496 } else if (is_tcf_mirred_egress_mirror(a)) {
497 if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
498 return -EOPNOTSUPP;
499
500 output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
501 err = nfp_fl_output(output, a, nfp_fl, false, netdev, *tun_type,
502 tun_out_cnt);
503 if (err)
504 return err;
505
506 *a_len += sizeof(struct nfp_fl_output);
507 } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
508 if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
509 return -EOPNOTSUPP;
510
511 pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
512 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
513
514 nfp_fl_pop_vlan(pop_v);
515 *a_len += sizeof(struct nfp_fl_pop_vlan);
516 } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
517 if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
518 return -EOPNOTSUPP;
519
520 psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
521 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
522
523 nfp_fl_push_vlan(psh_v, a);
524 *a_len += sizeof(struct nfp_fl_push_vlan);
525 } else if (is_tcf_tunnel_set(a) && nfp_fl_supported_tun_port(a)) {
526
527
528
529
530 if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
531 sizeof(struct nfp_fl_set_vxlan) > NFP_FL_MAX_A_SIZ)
532 return -EOPNOTSUPP;
533
534 *tun_type = NFP_FL_TUNNEL_VXLAN;
535 pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
536 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
537 *a_len += sizeof(struct nfp_fl_pre_tunnel);
538
539 s_vxl = (struct nfp_fl_set_vxlan *)&nfp_fl->action_data[*a_len];
540 err = nfp_fl_set_vxlan(s_vxl, a, pre_tun);
541 if (err)
542 return err;
543
544 *a_len += sizeof(struct nfp_fl_set_vxlan);
545 } else if (is_tcf_tunnel_release(a)) {
546
547 return 0;
548 } else if (is_tcf_pedit(a)) {
549 if (nfp_fl_pedit(a, &nfp_fl->action_data[*a_len], a_len))
550 return -EOPNOTSUPP;
551 } else {
552
553 return -EOPNOTSUPP;
554 }
555
556 return 0;
557}
558
559int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
560 struct net_device *netdev,
561 struct nfp_fl_payload *nfp_flow)
562{
563 int act_len, act_cnt, err, tun_out_cnt;
564 enum nfp_flower_tun_type tun_type;
565 const struct tc_action *a;
566 LIST_HEAD(actions);
567
568 memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
569 nfp_flow->meta.act_len = 0;
570 tun_type = NFP_FL_TUNNEL_NONE;
571 act_len = 0;
572 act_cnt = 0;
573 tun_out_cnt = 0;
574
575 tcf_exts_to_list(flow->exts, &actions);
576 list_for_each_entry(a, &actions, list) {
577 err = nfp_flower_loop_action(a, nfp_flow, &act_len, netdev,
578 &tun_type, &tun_out_cnt);
579 if (err)
580 return err;
581 act_cnt++;
582 }
583
584
585
586
587 if (act_cnt > 1)
588 nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
589
590 nfp_flow->meta.act_len = act_len;
591
592 return 0;
593}
594