1
2
3
4#include <linux/bitfield.h>
5#include <net/pkt_cls.h>
6
7#include "cmsg.h"
8#include "main.h"
9
10void
11nfp_flower_compile_meta(struct nfp_flower_meta_tci *ext,
12 struct nfp_flower_meta_tci *msk, u8 key_type)
13{
14
15 ext->nfp_flow_key_layer = key_type;
16 ext->mask_id = ~0;
17
18 msk->nfp_flow_key_layer = key_type;
19 msk->mask_id = ~0;
20}
21
22void
23nfp_flower_compile_tci(struct nfp_flower_meta_tci *ext,
24 struct nfp_flower_meta_tci *msk,
25 struct flow_rule *rule)
26{
27 u16 msk_tci, key_tci;
28
29 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
30 struct flow_match_vlan match;
31
32 flow_rule_match_vlan(rule, &match);
33
34 key_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
35 key_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
36 match.key->vlan_priority) |
37 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
38 match.key->vlan_id);
39
40 msk_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
41 msk_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
42 match.mask->vlan_priority) |
43 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
44 match.mask->vlan_id);
45
46 ext->tci |= cpu_to_be16((key_tci & msk_tci));
47 msk->tci |= cpu_to_be16(msk_tci);
48 }
49}
50
51static void
52nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
53 struct nfp_flower_meta_tci *msk,
54 struct flow_rule *rule, u8 key_type, bool qinq_sup)
55{
56 memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
57 memset(msk, 0, sizeof(struct nfp_flower_meta_tci));
58
59 nfp_flower_compile_meta(ext, msk, key_type);
60
61 if (!qinq_sup)
62 nfp_flower_compile_tci(ext, msk, rule);
63}
64
65void
66nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
67{
68 frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
69}
70
71int
72nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
73 bool mask_version, enum nfp_flower_tun_type tun_type,
74 struct netlink_ext_ack *extack)
75{
76 if (mask_version) {
77 frame->in_port = cpu_to_be32(~0);
78 return 0;
79 }
80
81 if (tun_type) {
82 frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
83 } else {
84 if (!cmsg_port) {
85 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid ingress interface for match offload");
86 return -EOPNOTSUPP;
87 }
88 frame->in_port = cpu_to_be32(cmsg_port);
89 }
90
91 return 0;
92}
93
94void
95nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
96 struct nfp_flower_mac_mpls *msk,
97 struct flow_rule *rule)
98{
99 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
100 struct flow_match_eth_addrs match;
101 int i;
102
103 flow_rule_match_eth_addrs(rule, &match);
104
105 for (i = 0; i < ETH_ALEN; i++) {
106 ext->mac_dst[i] |= match.key->dst[i] &
107 match.mask->dst[i];
108 msk->mac_dst[i] |= match.mask->dst[i];
109 ext->mac_src[i] |= match.key->src[i] &
110 match.mask->src[i];
111 msk->mac_src[i] |= match.mask->src[i];
112 }
113 }
114}
115
116int
117nfp_flower_compile_mpls(struct nfp_flower_mac_mpls *ext,
118 struct nfp_flower_mac_mpls *msk,
119 struct flow_rule *rule,
120 struct netlink_ext_ack *extack)
121{
122 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
123 struct flow_match_mpls match;
124 u32 key_mpls, msk_mpls;
125
126 flow_rule_match_mpls(rule, &match);
127
128
129 if (match.mask->used_lses != 1) {
130 NL_SET_ERR_MSG_MOD(extack,
131 "unsupported offload: invalid LSE depth for MPLS match offload");
132 return -EOPNOTSUPP;
133 }
134
135 key_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
136 match.key->ls[0].mpls_label) |
137 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
138 match.key->ls[0].mpls_tc) |
139 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
140 match.key->ls[0].mpls_bos) |
141 NFP_FLOWER_MASK_MPLS_Q;
142
143 msk_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
144 match.mask->ls[0].mpls_label) |
145 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
146 match.mask->ls[0].mpls_tc) |
147 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
148 match.mask->ls[0].mpls_bos) |
149 NFP_FLOWER_MASK_MPLS_Q;
150
151 ext->mpls_lse |= cpu_to_be32((key_mpls & msk_mpls));
152 msk->mpls_lse |= cpu_to_be32(msk_mpls);
153 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
154
155
156
157
158 struct flow_match_basic match;
159
160 flow_rule_match_basic(rule, &match);
161 if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
162 match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) {
163 ext->mpls_lse |= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
164 msk->mpls_lse |= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
165 }
166 }
167
168 return 0;
169}
170
171static int
172nfp_flower_compile_mac_mpls(struct nfp_flower_mac_mpls *ext,
173 struct nfp_flower_mac_mpls *msk,
174 struct flow_rule *rule,
175 struct netlink_ext_ack *extack)
176{
177 memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
178 memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
179
180 nfp_flower_compile_mac(ext, msk, rule);
181
182 return nfp_flower_compile_mpls(ext, msk, rule, extack);
183}
184
185void
186nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
187 struct nfp_flower_tp_ports *msk,
188 struct flow_rule *rule)
189{
190 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
191 struct flow_match_ports match;
192
193 flow_rule_match_ports(rule, &match);
194 ext->port_src |= match.key->src & match.mask->src;
195 ext->port_dst |= match.key->dst & match.mask->dst;
196 msk->port_src |= match.mask->src;
197 msk->port_dst |= match.mask->dst;
198 }
199}
200
201static void
202nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
203 struct nfp_flower_ip_ext *msk, struct flow_rule *rule)
204{
205 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
206 struct flow_match_basic match;
207
208 flow_rule_match_basic(rule, &match);
209 ext->proto |= match.key->ip_proto & match.mask->ip_proto;
210 msk->proto |= match.mask->ip_proto;
211 }
212
213 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
214 struct flow_match_ip match;
215
216 flow_rule_match_ip(rule, &match);
217 ext->tos |= match.key->tos & match.mask->tos;
218 ext->ttl |= match.key->ttl & match.mask->ttl;
219 msk->tos |= match.mask->tos;
220 msk->ttl |= match.mask->ttl;
221 }
222
223 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
224 u16 tcp_flags, tcp_flags_mask;
225 struct flow_match_tcp match;
226
227 flow_rule_match_tcp(rule, &match);
228 tcp_flags = be16_to_cpu(match.key->flags);
229 tcp_flags_mask = be16_to_cpu(match.mask->flags);
230
231 if (tcp_flags & TCPHDR_FIN)
232 ext->flags |= NFP_FL_TCP_FLAG_FIN;
233 if (tcp_flags_mask & TCPHDR_FIN)
234 msk->flags |= NFP_FL_TCP_FLAG_FIN;
235
236 if (tcp_flags & TCPHDR_SYN)
237 ext->flags |= NFP_FL_TCP_FLAG_SYN;
238 if (tcp_flags_mask & TCPHDR_SYN)
239 msk->flags |= NFP_FL_TCP_FLAG_SYN;
240
241 if (tcp_flags & TCPHDR_RST)
242 ext->flags |= NFP_FL_TCP_FLAG_RST;
243 if (tcp_flags_mask & TCPHDR_RST)
244 msk->flags |= NFP_FL_TCP_FLAG_RST;
245
246 if (tcp_flags & TCPHDR_PSH)
247 ext->flags |= NFP_FL_TCP_FLAG_PSH;
248 if (tcp_flags_mask & TCPHDR_PSH)
249 msk->flags |= NFP_FL_TCP_FLAG_PSH;
250
251 if (tcp_flags & TCPHDR_URG)
252 ext->flags |= NFP_FL_TCP_FLAG_URG;
253 if (tcp_flags_mask & TCPHDR_URG)
254 msk->flags |= NFP_FL_TCP_FLAG_URG;
255 }
256
257 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
258 struct flow_match_control match;
259
260 flow_rule_match_control(rule, &match);
261 if (match.key->flags & FLOW_DIS_IS_FRAGMENT)
262 ext->flags |= NFP_FL_IP_FRAGMENTED;
263 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT)
264 msk->flags |= NFP_FL_IP_FRAGMENTED;
265 if (match.key->flags & FLOW_DIS_FIRST_FRAG)
266 ext->flags |= NFP_FL_IP_FRAG_FIRST;
267 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
268 msk->flags |= NFP_FL_IP_FRAG_FIRST;
269 }
270}
271
272static void
273nfp_flower_fill_vlan(struct flow_match_vlan *match,
274 struct nfp_flower_vlan *ext,
275 struct nfp_flower_vlan *msk, bool outer_vlan)
276{
277 struct flow_dissector_key_vlan *mask = match->mask;
278 struct flow_dissector_key_vlan *key = match->key;
279 u16 msk_tci, key_tci;
280
281 key_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
282 key_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
283 key->vlan_priority) |
284 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
285 key->vlan_id);
286 msk_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
287 msk_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
288 mask->vlan_priority) |
289 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
290 mask->vlan_id);
291
292 if (outer_vlan) {
293 ext->outer_tci |= cpu_to_be16((key_tci & msk_tci));
294 ext->outer_tpid |= key->vlan_tpid & mask->vlan_tpid;
295 msk->outer_tci |= cpu_to_be16(msk_tci);
296 msk->outer_tpid |= mask->vlan_tpid;
297 } else {
298 ext->inner_tci |= cpu_to_be16((key_tci & msk_tci));
299 ext->inner_tpid |= key->vlan_tpid & mask->vlan_tpid;
300 msk->inner_tci |= cpu_to_be16(msk_tci);
301 msk->inner_tpid |= mask->vlan_tpid;
302 }
303}
304
305void
306nfp_flower_compile_vlan(struct nfp_flower_vlan *ext,
307 struct nfp_flower_vlan *msk,
308 struct flow_rule *rule)
309{
310 struct flow_match_vlan match;
311
312 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
313 flow_rule_match_vlan(rule, &match);
314 nfp_flower_fill_vlan(&match, ext, msk, true);
315 }
316 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
317 flow_rule_match_cvlan(rule, &match);
318 nfp_flower_fill_vlan(&match, ext, msk, false);
319 }
320}
321
322void
323nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
324 struct nfp_flower_ipv4 *msk, struct flow_rule *rule)
325{
326 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
327 struct flow_match_ipv4_addrs match;
328
329 flow_rule_match_ipv4_addrs(rule, &match);
330 ext->ipv4_src |= match.key->src & match.mask->src;
331 ext->ipv4_dst |= match.key->dst & match.mask->dst;
332 msk->ipv4_src |= match.mask->src;
333 msk->ipv4_dst |= match.mask->dst;
334 }
335
336 nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
337}
338
339void
340nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
341 struct nfp_flower_ipv6 *msk, struct flow_rule *rule)
342{
343 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
344 struct flow_match_ipv6_addrs match;
345 int i;
346
347 flow_rule_match_ipv6_addrs(rule, &match);
348 for (i = 0; i < sizeof(ext->ipv6_src); i++) {
349 ext->ipv6_src.s6_addr[i] |= match.key->src.s6_addr[i] &
350 match.mask->src.s6_addr[i];
351 ext->ipv6_dst.s6_addr[i] |= match.key->dst.s6_addr[i] &
352 match.mask->dst.s6_addr[i];
353 msk->ipv6_src.s6_addr[i] |= match.mask->src.s6_addr[i];
354 msk->ipv6_dst.s6_addr[i] |= match.mask->dst.s6_addr[i];
355 }
356 }
357
358 nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
359}
360
361void
362nfp_flower_compile_geneve_opt(u8 *ext, u8 *msk, struct flow_rule *rule)
363{
364 struct flow_match_enc_opts match;
365 int i;
366
367 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
368 flow_rule_match_enc_opts(rule, &match);
369
370 for (i = 0; i < match.mask->len; i++) {
371 ext[i] |= match.key->data[i] & match.mask->data[i];
372 msk[i] |= match.mask->data[i];
373 }
374 }
375}
376
377static void
378nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
379 struct nfp_flower_tun_ipv4 *msk,
380 struct flow_rule *rule)
381{
382 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
383 struct flow_match_ipv4_addrs match;
384
385 flow_rule_match_enc_ipv4_addrs(rule, &match);
386 ext->src |= match.key->src & match.mask->src;
387 ext->dst |= match.key->dst & match.mask->dst;
388 msk->src |= match.mask->src;
389 msk->dst |= match.mask->dst;
390 }
391}
392
393static void
394nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6 *ext,
395 struct nfp_flower_tun_ipv6 *msk,
396 struct flow_rule *rule)
397{
398 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
399 struct flow_match_ipv6_addrs match;
400 int i;
401
402 flow_rule_match_enc_ipv6_addrs(rule, &match);
403 for (i = 0; i < sizeof(ext->src); i++) {
404 ext->src.s6_addr[i] |= match.key->src.s6_addr[i] &
405 match.mask->src.s6_addr[i];
406 ext->dst.s6_addr[i] |= match.key->dst.s6_addr[i] &
407 match.mask->dst.s6_addr[i];
408 msk->src.s6_addr[i] |= match.mask->src.s6_addr[i];
409 msk->dst.s6_addr[i] |= match.mask->dst.s6_addr[i];
410 }
411 }
412}
413
414static void
415nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
416 struct nfp_flower_tun_ip_ext *msk,
417 struct flow_rule *rule)
418{
419 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
420 struct flow_match_ip match;
421
422 flow_rule_match_enc_ip(rule, &match);
423 ext->tos |= match.key->tos & match.mask->tos;
424 ext->ttl |= match.key->ttl & match.mask->ttl;
425 msk->tos |= match.mask->tos;
426 msk->ttl |= match.mask->ttl;
427 }
428}
429
430static void
431nfp_flower_compile_tun_udp_key(__be32 *key, __be32 *key_msk,
432 struct flow_rule *rule)
433{
434 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
435 struct flow_match_enc_keyid match;
436 u32 vni;
437
438 flow_rule_match_enc_keyid(rule, &match);
439 vni = be32_to_cpu((match.key->keyid & match.mask->keyid)) <<
440 NFP_FL_TUN_VNI_OFFSET;
441 *key |= cpu_to_be32(vni);
442 vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
443 *key_msk |= cpu_to_be32(vni);
444 }
445}
446
447static void
448nfp_flower_compile_tun_gre_key(__be32 *key, __be32 *key_msk, __be16 *flags,
449 __be16 *flags_msk, struct flow_rule *rule)
450{
451 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
452 struct flow_match_enc_keyid match;
453
454 flow_rule_match_enc_keyid(rule, &match);
455 *key |= match.key->keyid & match.mask->keyid;
456 *key_msk |= match.mask->keyid;
457
458 *flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
459 *flags_msk = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
460 }
461}
462
463void
464nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
465 struct nfp_flower_ipv4_gre_tun *msk,
466 struct flow_rule *rule)
467{
468
469 ext->ethertype = cpu_to_be16(ETH_P_TEB);
470 msk->ethertype = cpu_to_be16(~0);
471
472 nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
473 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
474 nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
475 &ext->tun_flags, &msk->tun_flags, rule);
476}
477
478void
479nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
480 struct nfp_flower_ipv4_udp_tun *msk,
481 struct flow_rule *rule)
482{
483 nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
484 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
485 nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
486}
487
488void
489nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext,
490 struct nfp_flower_ipv6_udp_tun *msk,
491 struct flow_rule *rule)
492{
493 nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
494 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
495 nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
496}
497
498void
499nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
500 struct nfp_flower_ipv6_gre_tun *msk,
501 struct flow_rule *rule)
502{
503
504 ext->ethertype = cpu_to_be16(ETH_P_TEB);
505 msk->ethertype = cpu_to_be16(~0);
506
507 nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
508 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
509 nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
510 &ext->tun_flags, &msk->tun_flags, rule);
511}
512
513int nfp_flower_compile_flow_match(struct nfp_app *app,
514 struct flow_rule *rule,
515 struct nfp_fl_key_ls *key_ls,
516 struct net_device *netdev,
517 struct nfp_fl_payload *nfp_flow,
518 enum nfp_flower_tun_type tun_type,
519 struct netlink_ext_ack *extack)
520{
521 struct nfp_flower_priv *priv = app->priv;
522 bool qinq_sup;
523 u32 port_id;
524 int ext_len;
525 int err;
526 u8 *ext;
527 u8 *msk;
528
529 port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
530
531 memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
532 memset(nfp_flow->mask_data, 0, key_ls->key_size);
533
534 ext = nfp_flow->unmasked_data;
535 msk = nfp_flow->mask_data;
536
537 qinq_sup = !!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ);
538
539 nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
540 (struct nfp_flower_meta_tci *)msk,
541 rule, key_ls->key_layer, qinq_sup);
542 ext += sizeof(struct nfp_flower_meta_tci);
543 msk += sizeof(struct nfp_flower_meta_tci);
544
545
546 if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
547 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
548 key_ls->key_layer_two);
549 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
550 key_ls->key_layer_two);
551 ext += sizeof(struct nfp_flower_ext_meta);
552 msk += sizeof(struct nfp_flower_ext_meta);
553 }
554
555
556 err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
557 port_id, false, tun_type, extack);
558 if (err)
559 return err;
560
561
562 err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
563 port_id, true, tun_type, extack);
564 if (err)
565 return err;
566
567 ext += sizeof(struct nfp_flower_in_port);
568 msk += sizeof(struct nfp_flower_in_port);
569
570 if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
571 err = nfp_flower_compile_mac_mpls((struct nfp_flower_mac_mpls *)ext,
572 (struct nfp_flower_mac_mpls *)msk,
573 rule, extack);
574 if (err)
575 return err;
576
577 ext += sizeof(struct nfp_flower_mac_mpls);
578 msk += sizeof(struct nfp_flower_mac_mpls);
579 }
580
581 if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
582 nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
583 (struct nfp_flower_tp_ports *)msk,
584 rule);
585 ext += sizeof(struct nfp_flower_tp_ports);
586 msk += sizeof(struct nfp_flower_tp_ports);
587 }
588
589 if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
590 nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
591 (struct nfp_flower_ipv4 *)msk,
592 rule);
593 ext += sizeof(struct nfp_flower_ipv4);
594 msk += sizeof(struct nfp_flower_ipv4);
595 }
596
597 if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
598 nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
599 (struct nfp_flower_ipv6 *)msk,
600 rule);
601 ext += sizeof(struct nfp_flower_ipv6);
602 msk += sizeof(struct nfp_flower_ipv6);
603 }
604
605 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
606 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
607 struct nfp_flower_ipv6_gre_tun *gre_match;
608 struct nfp_ipv6_addr_entry *entry;
609 struct in6_addr *dst;
610
611 nfp_flower_compile_ipv6_gre_tun((void *)ext,
612 (void *)msk, rule);
613 gre_match = (struct nfp_flower_ipv6_gre_tun *)ext;
614 dst = &gre_match->ipv6.dst;
615 ext += sizeof(struct nfp_flower_ipv6_gre_tun);
616 msk += sizeof(struct nfp_flower_ipv6_gre_tun);
617
618 entry = nfp_tunnel_add_ipv6_off(app, dst);
619 if (!entry)
620 return -EOPNOTSUPP;
621
622 nfp_flow->nfp_tun_ipv6 = entry;
623 } else {
624 __be32 dst;
625
626 nfp_flower_compile_ipv4_gre_tun((void *)ext,
627 (void *)msk, rule);
628 dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst;
629 ext += sizeof(struct nfp_flower_ipv4_gre_tun);
630 msk += sizeof(struct nfp_flower_ipv4_gre_tun);
631
632
633
634
635 nfp_flow->nfp_tun_ipv4_addr = dst;
636 nfp_tunnel_add_ipv4_off(app, dst);
637 }
638 }
639
640 if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) {
641 nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext,
642 (struct nfp_flower_vlan *)msk,
643 rule);
644 ext += sizeof(struct nfp_flower_vlan);
645 msk += sizeof(struct nfp_flower_vlan);
646 }
647
648 if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
649 key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
650 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
651 struct nfp_flower_ipv6_udp_tun *udp_match;
652 struct nfp_ipv6_addr_entry *entry;
653 struct in6_addr *dst;
654
655 nfp_flower_compile_ipv6_udp_tun((void *)ext,
656 (void *)msk, rule);
657 udp_match = (struct nfp_flower_ipv6_udp_tun *)ext;
658 dst = &udp_match->ipv6.dst;
659 ext += sizeof(struct nfp_flower_ipv6_udp_tun);
660 msk += sizeof(struct nfp_flower_ipv6_udp_tun);
661
662 entry = nfp_tunnel_add_ipv6_off(app, dst);
663 if (!entry)
664 return -EOPNOTSUPP;
665
666 nfp_flow->nfp_tun_ipv6 = entry;
667 } else {
668 __be32 dst;
669
670 nfp_flower_compile_ipv4_udp_tun((void *)ext,
671 (void *)msk, rule);
672 dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
673 ext += sizeof(struct nfp_flower_ipv4_udp_tun);
674 msk += sizeof(struct nfp_flower_ipv4_udp_tun);
675
676
677
678
679 nfp_flow->nfp_tun_ipv4_addr = dst;
680 nfp_tunnel_add_ipv4_off(app, dst);
681 }
682
683 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
684 nfp_flower_compile_geneve_opt(ext, msk, rule);
685 }
686 }
687
688
689
690
691 ext_len = (u32 *)ext - (u32 *)nfp_flow->unmasked_data;
692 if (ext_len > NFP_FLOWER_KEY_MAX_LW) {
693 NL_SET_ERR_MSG_MOD(extack,
694 "unsupported offload: flow key too long");
695 return -EOPNOTSUPP;
696 }
697
698 return 0;
699}
700