1
2
3
4#include <linux/bitfield.h>
5#include <net/pkt_cls.h>
6
7#include "cmsg.h"
8#include "main.h"
9
10static void
11nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
12 struct nfp_flower_meta_tci *msk,
13 struct flow_rule *rule, u8 key_type)
14{
15 u16 tmp_tci;
16
17 memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
18 memset(msk, 0, sizeof(struct nfp_flower_meta_tci));
19
20
21 ext->nfp_flow_key_layer = key_type;
22 ext->mask_id = ~0;
23
24 msk->nfp_flow_key_layer = key_type;
25 msk->mask_id = ~0;
26
27 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
28 struct flow_match_vlan match;
29
30 flow_rule_match_vlan(rule, &match);
31
32 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
33 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
34 match.key->vlan_priority) |
35 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
36 match.key->vlan_id);
37 ext->tci = cpu_to_be16(tmp_tci);
38
39 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
40 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
41 match.mask->vlan_priority) |
42 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
43 match.mask->vlan_id);
44 msk->tci = cpu_to_be16(tmp_tci);
45 }
46}
47
48static void
49nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
50{
51 frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
52}
53
54static int
55nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
56 bool mask_version, enum nfp_flower_tun_type tun_type,
57 struct netlink_ext_ack *extack)
58{
59 if (mask_version) {
60 frame->in_port = cpu_to_be32(~0);
61 return 0;
62 }
63
64 if (tun_type) {
65 frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
66 } else {
67 if (!cmsg_port) {
68 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid ingress interface for match offload");
69 return -EOPNOTSUPP;
70 }
71 frame->in_port = cpu_to_be32(cmsg_port);
72 }
73
74 return 0;
75}
76
77static void
78nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
79 struct nfp_flower_mac_mpls *msk, struct flow_rule *rule)
80{
81 memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
82 memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
83
84 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
85 struct flow_match_eth_addrs match;
86
87 flow_rule_match_eth_addrs(rule, &match);
88
89 ether_addr_copy(ext->mac_dst, &match.key->dst[0]);
90 ether_addr_copy(ext->mac_src, &match.key->src[0]);
91 ether_addr_copy(msk->mac_dst, &match.mask->dst[0]);
92 ether_addr_copy(msk->mac_src, &match.mask->src[0]);
93 }
94
95 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
96 struct flow_match_mpls match;
97 u32 t_mpls;
98
99 flow_rule_match_mpls(rule, &match);
100 t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.key->mpls_label) |
101 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.key->mpls_tc) |
102 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.key->mpls_bos) |
103 NFP_FLOWER_MASK_MPLS_Q;
104 ext->mpls_lse = cpu_to_be32(t_mpls);
105 t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.mask->mpls_label) |
106 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.mask->mpls_tc) |
107 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.mask->mpls_bos) |
108 NFP_FLOWER_MASK_MPLS_Q;
109 msk->mpls_lse = cpu_to_be32(t_mpls);
110 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
111
112
113
114
115 struct flow_match_basic match;
116
117 flow_rule_match_basic(rule, &match);
118 if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
119 match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) {
120 ext->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
121 msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
122 }
123 }
124}
125
126static void
127nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
128 struct nfp_flower_tp_ports *msk,
129 struct flow_rule *rule)
130{
131 memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
132 memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
133
134 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
135 struct flow_match_ports match;
136
137 flow_rule_match_ports(rule, &match);
138 ext->port_src = match.key->src;
139 ext->port_dst = match.key->dst;
140 msk->port_src = match.mask->src;
141 msk->port_dst = match.mask->dst;
142 }
143}
144
145static void
146nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
147 struct nfp_flower_ip_ext *msk, struct flow_rule *rule)
148{
149 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
150 struct flow_match_basic match;
151
152 flow_rule_match_basic(rule, &match);
153 ext->proto = match.key->ip_proto;
154 msk->proto = match.mask->ip_proto;
155 }
156
157 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
158 struct flow_match_ip match;
159
160 flow_rule_match_ip(rule, &match);
161 ext->tos = match.key->tos;
162 ext->ttl = match.key->ttl;
163 msk->tos = match.mask->tos;
164 msk->ttl = match.mask->ttl;
165 }
166
167 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
168 u16 tcp_flags, tcp_flags_mask;
169 struct flow_match_tcp match;
170
171 flow_rule_match_tcp(rule, &match);
172 tcp_flags = be16_to_cpu(match.key->flags);
173 tcp_flags_mask = be16_to_cpu(match.mask->flags);
174
175 if (tcp_flags & TCPHDR_FIN)
176 ext->flags |= NFP_FL_TCP_FLAG_FIN;
177 if (tcp_flags_mask & TCPHDR_FIN)
178 msk->flags |= NFP_FL_TCP_FLAG_FIN;
179
180 if (tcp_flags & TCPHDR_SYN)
181 ext->flags |= NFP_FL_TCP_FLAG_SYN;
182 if (tcp_flags_mask & TCPHDR_SYN)
183 msk->flags |= NFP_FL_TCP_FLAG_SYN;
184
185 if (tcp_flags & TCPHDR_RST)
186 ext->flags |= NFP_FL_TCP_FLAG_RST;
187 if (tcp_flags_mask & TCPHDR_RST)
188 msk->flags |= NFP_FL_TCP_FLAG_RST;
189
190 if (tcp_flags & TCPHDR_PSH)
191 ext->flags |= NFP_FL_TCP_FLAG_PSH;
192 if (tcp_flags_mask & TCPHDR_PSH)
193 msk->flags |= NFP_FL_TCP_FLAG_PSH;
194
195 if (tcp_flags & TCPHDR_URG)
196 ext->flags |= NFP_FL_TCP_FLAG_URG;
197 if (tcp_flags_mask & TCPHDR_URG)
198 msk->flags |= NFP_FL_TCP_FLAG_URG;
199 }
200
201 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
202 struct flow_match_control match;
203
204 flow_rule_match_control(rule, &match);
205 if (match.key->flags & FLOW_DIS_IS_FRAGMENT)
206 ext->flags |= NFP_FL_IP_FRAGMENTED;
207 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT)
208 msk->flags |= NFP_FL_IP_FRAGMENTED;
209 if (match.key->flags & FLOW_DIS_FIRST_FRAG)
210 ext->flags |= NFP_FL_IP_FRAG_FIRST;
211 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
212 msk->flags |= NFP_FL_IP_FRAG_FIRST;
213 }
214}
215
216static void
217nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
218 struct nfp_flower_ipv4 *msk, struct flow_rule *rule)
219{
220 struct flow_match_ipv4_addrs match;
221
222 memset(ext, 0, sizeof(struct nfp_flower_ipv4));
223 memset(msk, 0, sizeof(struct nfp_flower_ipv4));
224
225 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
226 flow_rule_match_ipv4_addrs(rule, &match);
227 ext->ipv4_src = match.key->src;
228 ext->ipv4_dst = match.key->dst;
229 msk->ipv4_src = match.mask->src;
230 msk->ipv4_dst = match.mask->dst;
231 }
232
233 nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
234}
235
236static void
237nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
238 struct nfp_flower_ipv6 *msk, struct flow_rule *rule)
239{
240 memset(ext, 0, sizeof(struct nfp_flower_ipv6));
241 memset(msk, 0, sizeof(struct nfp_flower_ipv6));
242
243 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
244 struct flow_match_ipv6_addrs match;
245
246 flow_rule_match_ipv6_addrs(rule, &match);
247 ext->ipv6_src = match.key->src;
248 ext->ipv6_dst = match.key->dst;
249 msk->ipv6_src = match.mask->src;
250 msk->ipv6_dst = match.mask->dst;
251 }
252
253 nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
254}
255
256static int
257nfp_flower_compile_geneve_opt(void *ext, void *msk, struct flow_rule *rule)
258{
259 struct flow_match_enc_opts match;
260
261 flow_rule_match_enc_opts(rule, &match);
262 memcpy(ext, match.key->data, match.key->len);
263 memcpy(msk, match.mask->data, match.mask->len);
264
265 return 0;
266}
267
268static void
269nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
270 struct nfp_flower_tun_ipv4 *msk,
271 struct flow_rule *rule)
272{
273 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
274 struct flow_match_ipv4_addrs match;
275
276 flow_rule_match_enc_ipv4_addrs(rule, &match);
277 ext->src = match.key->src;
278 ext->dst = match.key->dst;
279 msk->src = match.mask->src;
280 msk->dst = match.mask->dst;
281 }
282}
283
284static void
285nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6 *ext,
286 struct nfp_flower_tun_ipv6 *msk,
287 struct flow_rule *rule)
288{
289 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
290 struct flow_match_ipv6_addrs match;
291
292 flow_rule_match_enc_ipv6_addrs(rule, &match);
293 ext->src = match.key->src;
294 ext->dst = match.key->dst;
295 msk->src = match.mask->src;
296 msk->dst = match.mask->dst;
297 }
298}
299
300static void
301nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
302 struct nfp_flower_tun_ip_ext *msk,
303 struct flow_rule *rule)
304{
305 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
306 struct flow_match_ip match;
307
308 flow_rule_match_enc_ip(rule, &match);
309 ext->tos = match.key->tos;
310 ext->ttl = match.key->ttl;
311 msk->tos = match.mask->tos;
312 msk->ttl = match.mask->ttl;
313 }
314}
315
316static void
317nfp_flower_compile_tun_udp_key(__be32 *key, __be32 *key_msk,
318 struct flow_rule *rule)
319{
320 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
321 struct flow_match_enc_keyid match;
322 u32 vni;
323
324 flow_rule_match_enc_keyid(rule, &match);
325 vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
326 *key = cpu_to_be32(vni);
327 vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
328 *key_msk = cpu_to_be32(vni);
329 }
330}
331
332static void
333nfp_flower_compile_tun_gre_key(__be32 *key, __be32 *key_msk, __be16 *flags,
334 __be16 *flags_msk, struct flow_rule *rule)
335{
336 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
337 struct flow_match_enc_keyid match;
338
339 flow_rule_match_enc_keyid(rule, &match);
340 *key = match.key->keyid;
341 *key_msk = match.mask->keyid;
342
343 *flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
344 *flags_msk = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
345 }
346}
347
348static void
349nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
350 struct nfp_flower_ipv4_gre_tun *msk,
351 struct flow_rule *rule)
352{
353 memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
354 memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
355
356
357 ext->ethertype = cpu_to_be16(ETH_P_TEB);
358 msk->ethertype = cpu_to_be16(~0);
359
360 nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
361 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
362 nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
363 &ext->tun_flags, &msk->tun_flags, rule);
364}
365
366static void
367nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
368 struct nfp_flower_ipv4_udp_tun *msk,
369 struct flow_rule *rule)
370{
371 memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
372 memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
373
374 nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
375 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
376 nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
377}
378
379static void
380nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext,
381 struct nfp_flower_ipv6_udp_tun *msk,
382 struct flow_rule *rule)
383{
384 memset(ext, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
385 memset(msk, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
386
387 nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
388 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
389 nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
390}
391
392static void
393nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
394 struct nfp_flower_ipv6_gre_tun *msk,
395 struct flow_rule *rule)
396{
397 memset(ext, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
398 memset(msk, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
399
400
401 ext->ethertype = cpu_to_be16(ETH_P_TEB);
402 msk->ethertype = cpu_to_be16(~0);
403
404 nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
405 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
406 nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
407 &ext->tun_flags, &msk->tun_flags, rule);
408}
409
410int nfp_flower_compile_flow_match(struct nfp_app *app,
411 struct flow_cls_offload *flow,
412 struct nfp_fl_key_ls *key_ls,
413 struct net_device *netdev,
414 struct nfp_fl_payload *nfp_flow,
415 enum nfp_flower_tun_type tun_type,
416 struct netlink_ext_ack *extack)
417{
418 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
419 u32 port_id;
420 int err;
421 u8 *ext;
422 u8 *msk;
423
424 port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
425
426 memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
427 memset(nfp_flow->mask_data, 0, key_ls->key_size);
428
429 ext = nfp_flow->unmasked_data;
430 msk = nfp_flow->mask_data;
431
432 nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
433 (struct nfp_flower_meta_tci *)msk,
434 rule, key_ls->key_layer);
435 ext += sizeof(struct nfp_flower_meta_tci);
436 msk += sizeof(struct nfp_flower_meta_tci);
437
438
439 if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
440 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
441 key_ls->key_layer_two);
442 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
443 key_ls->key_layer_two);
444 ext += sizeof(struct nfp_flower_ext_meta);
445 msk += sizeof(struct nfp_flower_ext_meta);
446 }
447
448
449 err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
450 port_id, false, tun_type, extack);
451 if (err)
452 return err;
453
454
455 err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
456 port_id, true, tun_type, extack);
457 if (err)
458 return err;
459
460 ext += sizeof(struct nfp_flower_in_port);
461 msk += sizeof(struct nfp_flower_in_port);
462
463 if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
464 nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
465 (struct nfp_flower_mac_mpls *)msk,
466 rule);
467 ext += sizeof(struct nfp_flower_mac_mpls);
468 msk += sizeof(struct nfp_flower_mac_mpls);
469 }
470
471 if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
472 nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
473 (struct nfp_flower_tp_ports *)msk,
474 rule);
475 ext += sizeof(struct nfp_flower_tp_ports);
476 msk += sizeof(struct nfp_flower_tp_ports);
477 }
478
479 if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
480 nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
481 (struct nfp_flower_ipv4 *)msk,
482 rule);
483 ext += sizeof(struct nfp_flower_ipv4);
484 msk += sizeof(struct nfp_flower_ipv4);
485 }
486
487 if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
488 nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
489 (struct nfp_flower_ipv6 *)msk,
490 rule);
491 ext += sizeof(struct nfp_flower_ipv6);
492 msk += sizeof(struct nfp_flower_ipv6);
493 }
494
495 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
496 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
497 struct nfp_flower_ipv6_gre_tun *gre_match;
498 struct nfp_ipv6_addr_entry *entry;
499 struct in6_addr *dst;
500
501 nfp_flower_compile_ipv6_gre_tun((void *)ext,
502 (void *)msk, rule);
503 gre_match = (struct nfp_flower_ipv6_gre_tun *)ext;
504 dst = &gre_match->ipv6.dst;
505 ext += sizeof(struct nfp_flower_ipv6_gre_tun);
506 msk += sizeof(struct nfp_flower_ipv6_gre_tun);
507
508 entry = nfp_tunnel_add_ipv6_off(app, dst);
509 if (!entry)
510 return -EOPNOTSUPP;
511
512 nfp_flow->nfp_tun_ipv6 = entry;
513 } else {
514 __be32 dst;
515
516 nfp_flower_compile_ipv4_gre_tun((void *)ext,
517 (void *)msk, rule);
518 dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst;
519 ext += sizeof(struct nfp_flower_ipv4_gre_tun);
520 msk += sizeof(struct nfp_flower_ipv4_gre_tun);
521
522
523
524
525 nfp_flow->nfp_tun_ipv4_addr = dst;
526 nfp_tunnel_add_ipv4_off(app, dst);
527 }
528 }
529
530 if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
531 key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
532 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
533 struct nfp_flower_ipv6_udp_tun *udp_match;
534 struct nfp_ipv6_addr_entry *entry;
535 struct in6_addr *dst;
536
537 nfp_flower_compile_ipv6_udp_tun((void *)ext,
538 (void *)msk, rule);
539 udp_match = (struct nfp_flower_ipv6_udp_tun *)ext;
540 dst = &udp_match->ipv6.dst;
541 ext += sizeof(struct nfp_flower_ipv6_udp_tun);
542 msk += sizeof(struct nfp_flower_ipv6_udp_tun);
543
544 entry = nfp_tunnel_add_ipv6_off(app, dst);
545 if (!entry)
546 return -EOPNOTSUPP;
547
548 nfp_flow->nfp_tun_ipv6 = entry;
549 } else {
550 __be32 dst;
551
552 nfp_flower_compile_ipv4_udp_tun((void *)ext,
553 (void *)msk, rule);
554 dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
555 ext += sizeof(struct nfp_flower_ipv4_udp_tun);
556 msk += sizeof(struct nfp_flower_ipv4_udp_tun);
557
558
559
560
561 nfp_flow->nfp_tun_ipv4_addr = dst;
562 nfp_tunnel_add_ipv4_off(app, dst);
563 }
564
565 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
566 err = nfp_flower_compile_geneve_opt(ext, msk, rule);
567 if (err)
568 return err;
569 }
570 }
571
572 return 0;
573}
574