1
2
3
4
5
6#include <linux/uaccess.h>
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>
9#include <linux/if_ether.h>
10#include <linux/if_vlan.h>
11#include <net/llc_pdu.h>
12#include <linux/kernel.h>
13#include <linux/jhash.h>
14#include <linux/jiffies.h>
15#include <linux/llc.h>
16#include <linux/module.h>
17#include <linux/in.h>
18#include <linux/rcupdate.h>
19#include <linux/cpumask.h>
20#include <linux/if_arp.h>
21#include <linux/ip.h>
22#include <linux/ipv6.h>
23#include <linux/mpls.h>
24#include <linux/sctp.h>
25#include <linux/smp.h>
26#include <linux/tcp.h>
27#include <linux/udp.h>
28#include <linux/icmp.h>
29#include <linux/icmpv6.h>
30#include <linux/rculist.h>
31#include <net/ip.h>
32#include <net/ip_tunnels.h>
33#include <net/ipv6.h>
34#include <net/mpls.h>
35#include <net/ndisc.h>
36#include <net/nsh.h>
37
38#include "conntrack.h"
39#include "datapath.h"
40#include "flow.h"
41#include "flow_netlink.h"
42#include "vport.h"
43
44u64 ovs_flow_used_time(unsigned long flow_jiffies)
45{
46 struct timespec64 cur_ts;
47 u64 cur_ms, idle_ms;
48
49 ktime_get_ts64(&cur_ts);
50 idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
51 cur_ms = (u64)(u32)cur_ts.tv_sec * MSEC_PER_SEC +
52 cur_ts.tv_nsec / NSEC_PER_MSEC;
53
54 return cur_ms - idle_ms;
55}
56
57#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
58
59void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
60 const struct sk_buff *skb)
61{
62 struct sw_flow_stats *stats;
63 unsigned int cpu = smp_processor_id();
64 int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
65
66 stats = rcu_dereference(flow->stats[cpu]);
67
68
69 if (likely(stats)) {
70 spin_lock(&stats->lock);
71
72 if (cpu == 0 && unlikely(flow->stats_last_writer != cpu))
73 flow->stats_last_writer = cpu;
74 } else {
75 stats = rcu_dereference(flow->stats[0]);
76 spin_lock(&stats->lock);
77
78
79
80
81 if (unlikely(flow->stats_last_writer != cpu)) {
82
83
84
85
86
87 if (likely(flow->stats_last_writer != -1) &&
88 likely(!rcu_access_pointer(flow->stats[cpu]))) {
89
90 struct sw_flow_stats *new_stats;
91
92 new_stats =
93 kmem_cache_alloc_node(flow_stats_cache,
94 GFP_NOWAIT |
95 __GFP_THISNODE |
96 __GFP_NOWARN |
97 __GFP_NOMEMALLOC,
98 numa_node_id());
99 if (likely(new_stats)) {
100 new_stats->used = jiffies;
101 new_stats->packet_count = 1;
102 new_stats->byte_count = len;
103 new_stats->tcp_flags = tcp_flags;
104 spin_lock_init(&new_stats->lock);
105
106 rcu_assign_pointer(flow->stats[cpu],
107 new_stats);
108 cpumask_set_cpu(cpu, &flow->cpu_used_mask);
109 goto unlock;
110 }
111 }
112 flow->stats_last_writer = cpu;
113 }
114 }
115
116 stats->used = jiffies;
117 stats->packet_count++;
118 stats->byte_count += len;
119 stats->tcp_flags |= tcp_flags;
120unlock:
121 spin_unlock(&stats->lock);
122}
123
124
125void ovs_flow_stats_get(const struct sw_flow *flow,
126 struct ovs_flow_stats *ovs_stats,
127 unsigned long *used, __be16 *tcp_flags)
128{
129 int cpu;
130
131 *used = 0;
132 *tcp_flags = 0;
133 memset(ovs_stats, 0, sizeof(*ovs_stats));
134
135
136 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
137 struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
138
139 if (stats) {
140
141
142
143 spin_lock_bh(&stats->lock);
144 if (!*used || time_after(stats->used, *used))
145 *used = stats->used;
146 *tcp_flags |= stats->tcp_flags;
147 ovs_stats->n_packets += stats->packet_count;
148 ovs_stats->n_bytes += stats->byte_count;
149 spin_unlock_bh(&stats->lock);
150 }
151 }
152}
153
154
155void ovs_flow_stats_clear(struct sw_flow *flow)
156{
157 int cpu;
158
159
160 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
161 struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
162
163 if (stats) {
164 spin_lock_bh(&stats->lock);
165 stats->used = 0;
166 stats->packet_count = 0;
167 stats->byte_count = 0;
168 stats->tcp_flags = 0;
169 spin_unlock_bh(&stats->lock);
170 }
171 }
172}
173
174static int check_header(struct sk_buff *skb, int len)
175{
176 if (unlikely(skb->len < len))
177 return -EINVAL;
178 if (unlikely(!pskb_may_pull(skb, len)))
179 return -ENOMEM;
180 return 0;
181}
182
183static bool arphdr_ok(struct sk_buff *skb)
184{
185 return pskb_may_pull(skb, skb_network_offset(skb) +
186 sizeof(struct arp_eth_header));
187}
188
189static int check_iphdr(struct sk_buff *skb)
190{
191 unsigned int nh_ofs = skb_network_offset(skb);
192 unsigned int ip_len;
193 int err;
194
195 err = check_header(skb, nh_ofs + sizeof(struct iphdr));
196 if (unlikely(err))
197 return err;
198
199 ip_len = ip_hdrlen(skb);
200 if (unlikely(ip_len < sizeof(struct iphdr) ||
201 skb->len < nh_ofs + ip_len))
202 return -EINVAL;
203
204 skb_set_transport_header(skb, nh_ofs + ip_len);
205 return 0;
206}
207
208static bool tcphdr_ok(struct sk_buff *skb)
209{
210 int th_ofs = skb_transport_offset(skb);
211 int tcp_len;
212
213 if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
214 return false;
215
216 tcp_len = tcp_hdrlen(skb);
217 if (unlikely(tcp_len < sizeof(struct tcphdr) ||
218 skb->len < th_ofs + tcp_len))
219 return false;
220
221 return true;
222}
223
224static bool udphdr_ok(struct sk_buff *skb)
225{
226 return pskb_may_pull(skb, skb_transport_offset(skb) +
227 sizeof(struct udphdr));
228}
229
230static bool sctphdr_ok(struct sk_buff *skb)
231{
232 return pskb_may_pull(skb, skb_transport_offset(skb) +
233 sizeof(struct sctphdr));
234}
235
236static bool icmphdr_ok(struct sk_buff *skb)
237{
238 return pskb_may_pull(skb, skb_transport_offset(skb) +
239 sizeof(struct icmphdr));
240}
241
242static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
243{
244 unsigned short frag_off;
245 unsigned int payload_ofs = 0;
246 unsigned int nh_ofs = skb_network_offset(skb);
247 unsigned int nh_len;
248 struct ipv6hdr *nh;
249 int err, nexthdr, flags = 0;
250
251 err = check_header(skb, nh_ofs + sizeof(*nh));
252 if (unlikely(err))
253 return err;
254
255 nh = ipv6_hdr(skb);
256
257 key->ip.proto = NEXTHDR_NONE;
258 key->ip.tos = ipv6_get_dsfield(nh);
259 key->ip.ttl = nh->hop_limit;
260 key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
261 key->ipv6.addr.src = nh->saddr;
262 key->ipv6.addr.dst = nh->daddr;
263
264 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
265 if (flags & IP6_FH_F_FRAG) {
266 if (frag_off) {
267 key->ip.frag = OVS_FRAG_TYPE_LATER;
268 key->ip.proto = nexthdr;
269 return 0;
270 }
271 key->ip.frag = OVS_FRAG_TYPE_FIRST;
272 } else {
273 key->ip.frag = OVS_FRAG_TYPE_NONE;
274 }
275
276
277
278
279
280 if (unlikely(nexthdr < 0))
281 return -EPROTO;
282
283 nh_len = payload_ofs - nh_ofs;
284 skb_set_transport_header(skb, nh_ofs + nh_len);
285 key->ip.proto = nexthdr;
286 return nh_len;
287}
288
289static bool icmp6hdr_ok(struct sk_buff *skb)
290{
291 return pskb_may_pull(skb, skb_transport_offset(skb) +
292 sizeof(struct icmp6hdr));
293}
294
295
296
297
298
299
300
301
302
303
304
305static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh,
306 bool untag_vlan)
307{
308 struct vlan_head *vh = (struct vlan_head *)skb->data;
309
310 if (likely(!eth_type_vlan(vh->tpid)))
311 return 0;
312
313 if (unlikely(skb->len < sizeof(struct vlan_head) + sizeof(__be16)))
314 return 0;
315
316 if (unlikely(!pskb_may_pull(skb, sizeof(struct vlan_head) +
317 sizeof(__be16))))
318 return -ENOMEM;
319
320 vh = (struct vlan_head *)skb->data;
321 key_vh->tci = vh->tci | htons(VLAN_CFI_MASK);
322 key_vh->tpid = vh->tpid;
323
324 if (unlikely(untag_vlan)) {
325 int offset = skb->data - skb_mac_header(skb);
326 u16 tci;
327 int err;
328
329 __skb_push(skb, offset);
330 err = __skb_vlan_pop(skb, &tci);
331 __skb_pull(skb, offset);
332 if (err)
333 return err;
334 __vlan_hwaccel_put_tag(skb, key_vh->tpid, tci);
335 } else {
336 __skb_pull(skb, sizeof(struct vlan_head));
337 }
338 return 1;
339}
340
341static void clear_vlan(struct sw_flow_key *key)
342{
343 key->eth.vlan.tci = 0;
344 key->eth.vlan.tpid = 0;
345 key->eth.cvlan.tci = 0;
346 key->eth.cvlan.tpid = 0;
347}
348
349static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
350{
351 int res;
352
353 if (skb_vlan_tag_present(skb)) {
354 key->eth.vlan.tci = htons(skb->vlan_tci) | htons(VLAN_CFI_MASK);
355 key->eth.vlan.tpid = skb->vlan_proto;
356 } else {
357
358 res = parse_vlan_tag(skb, &key->eth.vlan, true);
359 if (res <= 0)
360 return res;
361 }
362
363
364 res = parse_vlan_tag(skb, &key->eth.cvlan, false);
365 if (res <= 0)
366 return res;
367
368 return 0;
369}
370
371static __be16 parse_ethertype(struct sk_buff *skb)
372{
373 struct llc_snap_hdr {
374 u8 dsap;
375 u8 ssap;
376 u8 ctrl;
377 u8 oui[3];
378 __be16 ethertype;
379 };
380 struct llc_snap_hdr *llc;
381 __be16 proto;
382
383 proto = *(__be16 *) skb->data;
384 __skb_pull(skb, sizeof(__be16));
385
386 if (eth_proto_is_802_3(proto))
387 return proto;
388
389 if (skb->len < sizeof(struct llc_snap_hdr))
390 return htons(ETH_P_802_2);
391
392 if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
393 return htons(0);
394
395 llc = (struct llc_snap_hdr *) skb->data;
396 if (llc->dsap != LLC_SAP_SNAP ||
397 llc->ssap != LLC_SAP_SNAP ||
398 (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
399 return htons(ETH_P_802_2);
400
401 __skb_pull(skb, sizeof(struct llc_snap_hdr));
402
403 if (eth_proto_is_802_3(llc->ethertype))
404 return llc->ethertype;
405
406 return htons(ETH_P_802_2);
407}
408
409static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
410 int nh_len)
411{
412 struct icmp6hdr *icmp = icmp6_hdr(skb);
413
414
415
416
417 key->tp.src = htons(icmp->icmp6_type);
418 key->tp.dst = htons(icmp->icmp6_code);
419 memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd));
420
421 if (icmp->icmp6_code == 0 &&
422 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
423 icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
424 int icmp_len = skb->len - skb_transport_offset(skb);
425 struct nd_msg *nd;
426 int offset;
427
428
429
430
431 if (unlikely(icmp_len < sizeof(*nd)))
432 return 0;
433
434 if (unlikely(skb_linearize(skb)))
435 return -ENOMEM;
436
437 nd = (struct nd_msg *)skb_transport_header(skb);
438 key->ipv6.nd.target = nd->target;
439
440 icmp_len -= sizeof(*nd);
441 offset = 0;
442 while (icmp_len >= 8) {
443 struct nd_opt_hdr *nd_opt =
444 (struct nd_opt_hdr *)(nd->opt + offset);
445 int opt_len = nd_opt->nd_opt_len * 8;
446
447 if (unlikely(!opt_len || opt_len > icmp_len))
448 return 0;
449
450
451
452
453
454 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
455 && opt_len == 8) {
456 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
457 goto invalid;
458 ether_addr_copy(key->ipv6.nd.sll,
459 &nd->opt[offset+sizeof(*nd_opt)]);
460 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
461 && opt_len == 8) {
462 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
463 goto invalid;
464 ether_addr_copy(key->ipv6.nd.tll,
465 &nd->opt[offset+sizeof(*nd_opt)]);
466 }
467
468 icmp_len -= opt_len;
469 offset += opt_len;
470 }
471 }
472
473 return 0;
474
475invalid:
476 memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
477 memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
478 memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
479
480 return 0;
481}
482
483static int parse_nsh(struct sk_buff *skb, struct sw_flow_key *key)
484{
485 struct nshhdr *nh;
486 unsigned int nh_ofs = skb_network_offset(skb);
487 u8 version, length;
488 int err;
489
490 err = check_header(skb, nh_ofs + NSH_BASE_HDR_LEN);
491 if (unlikely(err))
492 return err;
493
494 nh = nsh_hdr(skb);
495 version = nsh_get_ver(nh);
496 length = nsh_hdr_len(nh);
497
498 if (version != 0)
499 return -EINVAL;
500
501 err = check_header(skb, nh_ofs + length);
502 if (unlikely(err))
503 return err;
504
505 nh = nsh_hdr(skb);
506 key->nsh.base.flags = nsh_get_flags(nh);
507 key->nsh.base.ttl = nsh_get_ttl(nh);
508 key->nsh.base.mdtype = nh->mdtype;
509 key->nsh.base.np = nh->np;
510 key->nsh.base.path_hdr = nh->path_hdr;
511 switch (key->nsh.base.mdtype) {
512 case NSH_M_TYPE1:
513 if (length != NSH_M_TYPE1_LEN)
514 return -EINVAL;
515 memcpy(key->nsh.context, nh->md1.context,
516 sizeof(nh->md1));
517 break;
518 case NSH_M_TYPE2:
519 memset(key->nsh.context, 0,
520 sizeof(nh->md1));
521 break;
522 default:
523 return -EINVAL;
524 }
525
526 return 0;
527}
528
529
530
531
532
533
534
535
536static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
537{
538 int error;
539
540
541 if (key->eth.type == htons(ETH_P_IP)) {
542 struct iphdr *nh;
543 __be16 offset;
544
545 error = check_iphdr(skb);
546 if (unlikely(error)) {
547 memset(&key->ip, 0, sizeof(key->ip));
548 memset(&key->ipv4, 0, sizeof(key->ipv4));
549 if (error == -EINVAL) {
550 skb->transport_header = skb->network_header;
551 error = 0;
552 }
553 return error;
554 }
555
556 nh = ip_hdr(skb);
557 key->ipv4.addr.src = nh->saddr;
558 key->ipv4.addr.dst = nh->daddr;
559
560 key->ip.proto = nh->protocol;
561 key->ip.tos = nh->tos;
562 key->ip.ttl = nh->ttl;
563
564 offset = nh->frag_off & htons(IP_OFFSET);
565 if (offset) {
566 key->ip.frag = OVS_FRAG_TYPE_LATER;
567 memset(&key->tp, 0, sizeof(key->tp));
568 return 0;
569 }
570 if (nh->frag_off & htons(IP_MF) ||
571 skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
572 key->ip.frag = OVS_FRAG_TYPE_FIRST;
573 else
574 key->ip.frag = OVS_FRAG_TYPE_NONE;
575
576
577 if (key->ip.proto == IPPROTO_TCP) {
578 if (tcphdr_ok(skb)) {
579 struct tcphdr *tcp = tcp_hdr(skb);
580 key->tp.src = tcp->source;
581 key->tp.dst = tcp->dest;
582 key->tp.flags = TCP_FLAGS_BE16(tcp);
583 } else {
584 memset(&key->tp, 0, sizeof(key->tp));
585 }
586
587 } else if (key->ip.proto == IPPROTO_UDP) {
588 if (udphdr_ok(skb)) {
589 struct udphdr *udp = udp_hdr(skb);
590 key->tp.src = udp->source;
591 key->tp.dst = udp->dest;
592 } else {
593 memset(&key->tp, 0, sizeof(key->tp));
594 }
595 } else if (key->ip.proto == IPPROTO_SCTP) {
596 if (sctphdr_ok(skb)) {
597 struct sctphdr *sctp = sctp_hdr(skb);
598 key->tp.src = sctp->source;
599 key->tp.dst = sctp->dest;
600 } else {
601 memset(&key->tp, 0, sizeof(key->tp));
602 }
603 } else if (key->ip.proto == IPPROTO_ICMP) {
604 if (icmphdr_ok(skb)) {
605 struct icmphdr *icmp = icmp_hdr(skb);
606
607
608
609 key->tp.src = htons(icmp->type);
610 key->tp.dst = htons(icmp->code);
611 } else {
612 memset(&key->tp, 0, sizeof(key->tp));
613 }
614 }
615
616 } else if (key->eth.type == htons(ETH_P_ARP) ||
617 key->eth.type == htons(ETH_P_RARP)) {
618 struct arp_eth_header *arp;
619 bool arp_available = arphdr_ok(skb);
620
621 arp = (struct arp_eth_header *)skb_network_header(skb);
622
623 if (arp_available &&
624 arp->ar_hrd == htons(ARPHRD_ETHER) &&
625 arp->ar_pro == htons(ETH_P_IP) &&
626 arp->ar_hln == ETH_ALEN &&
627 arp->ar_pln == 4) {
628
629
630 if (ntohs(arp->ar_op) <= 0xff)
631 key->ip.proto = ntohs(arp->ar_op);
632 else
633 key->ip.proto = 0;
634
635 memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
636 memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
637 ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha);
638 ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha);
639 } else {
640 memset(&key->ip, 0, sizeof(key->ip));
641 memset(&key->ipv4, 0, sizeof(key->ipv4));
642 }
643 } else if (eth_p_mpls(key->eth.type)) {
644 u8 label_count = 1;
645
646 memset(&key->mpls, 0, sizeof(key->mpls));
647 skb_set_inner_network_header(skb, skb->mac_len);
648 while (1) {
649 __be32 lse;
650
651 error = check_header(skb, skb->mac_len +
652 label_count * MPLS_HLEN);
653 if (unlikely(error))
654 return 0;
655
656 memcpy(&lse, skb_inner_network_header(skb), MPLS_HLEN);
657
658 if (label_count <= MPLS_LABEL_DEPTH)
659 memcpy(&key->mpls.lse[label_count - 1], &lse,
660 MPLS_HLEN);
661
662 skb_set_inner_network_header(skb, skb->mac_len +
663 label_count * MPLS_HLEN);
664 if (lse & htonl(MPLS_LS_S_MASK))
665 break;
666
667 label_count++;
668 }
669 if (label_count > MPLS_LABEL_DEPTH)
670 label_count = MPLS_LABEL_DEPTH;
671
672 key->mpls.num_labels_mask = GENMASK(label_count - 1, 0);
673 } else if (key->eth.type == htons(ETH_P_IPV6)) {
674 int nh_len;
675
676 nh_len = parse_ipv6hdr(skb, key);
677 if (unlikely(nh_len < 0)) {
678 switch (nh_len) {
679 case -EINVAL:
680 memset(&key->ip, 0, sizeof(key->ip));
681 memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr));
682 fallthrough;
683 case -EPROTO:
684 skb->transport_header = skb->network_header;
685 error = 0;
686 break;
687 default:
688 error = nh_len;
689 }
690 return error;
691 }
692
693 if (key->ip.frag == OVS_FRAG_TYPE_LATER) {
694 memset(&key->tp, 0, sizeof(key->tp));
695 return 0;
696 }
697 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
698 key->ip.frag = OVS_FRAG_TYPE_FIRST;
699
700
701 if (key->ip.proto == NEXTHDR_TCP) {
702 if (tcphdr_ok(skb)) {
703 struct tcphdr *tcp = tcp_hdr(skb);
704 key->tp.src = tcp->source;
705 key->tp.dst = tcp->dest;
706 key->tp.flags = TCP_FLAGS_BE16(tcp);
707 } else {
708 memset(&key->tp, 0, sizeof(key->tp));
709 }
710 } else if (key->ip.proto == NEXTHDR_UDP) {
711 if (udphdr_ok(skb)) {
712 struct udphdr *udp = udp_hdr(skb);
713 key->tp.src = udp->source;
714 key->tp.dst = udp->dest;
715 } else {
716 memset(&key->tp, 0, sizeof(key->tp));
717 }
718 } else if (key->ip.proto == NEXTHDR_SCTP) {
719 if (sctphdr_ok(skb)) {
720 struct sctphdr *sctp = sctp_hdr(skb);
721 key->tp.src = sctp->source;
722 key->tp.dst = sctp->dest;
723 } else {
724 memset(&key->tp, 0, sizeof(key->tp));
725 }
726 } else if (key->ip.proto == NEXTHDR_ICMP) {
727 if (icmp6hdr_ok(skb)) {
728 error = parse_icmpv6(skb, key, nh_len);
729 if (error)
730 return error;
731 } else {
732 memset(&key->tp, 0, sizeof(key->tp));
733 }
734 }
735 } else if (key->eth.type == htons(ETH_P_NSH)) {
736 error = parse_nsh(skb, key);
737 if (error)
738 return error;
739 }
740 return 0;
741}
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
769{
770 struct ethhdr *eth;
771
772
773 key->tp.flags = 0;
774
775 skb_reset_mac_header(skb);
776
777
778 clear_vlan(key);
779 if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
780 if (unlikely(eth_type_vlan(skb->protocol)))
781 return -EINVAL;
782
783 skb_reset_network_header(skb);
784 key->eth.type = skb->protocol;
785 } else {
786 eth = eth_hdr(skb);
787 ether_addr_copy(key->eth.src, eth->h_source);
788 ether_addr_copy(key->eth.dst, eth->h_dest);
789
790 __skb_pull(skb, 2 * ETH_ALEN);
791
792
793
794
795 if (unlikely(parse_vlan(skb, key)))
796 return -ENOMEM;
797
798 key->eth.type = parse_ethertype(skb);
799 if (unlikely(key->eth.type == htons(0)))
800 return -ENOMEM;
801
802
803
804
805
806 if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK))
807 skb->protocol = key->eth.cvlan.tpid;
808 else
809 skb->protocol = key->eth.type;
810
811 skb_reset_network_header(skb);
812 __skb_push(skb, skb->data - skb_mac_header(skb));
813 }
814
815 skb_reset_mac_len(skb);
816
817
818 return key_extract_l3l4(skb, key);
819}
820
821
822
823
824int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
825{
826 return key_extract_l3l4(skb, key);
827}
828
829int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
830{
831 int res;
832
833 res = key_extract(skb, key);
834 if (!res)
835 key->mac_proto &= ~SW_FLOW_KEY_INVALID;
836
837 return res;
838}
839
840static int key_extract_mac_proto(struct sk_buff *skb)
841{
842 switch (skb->dev->type) {
843 case ARPHRD_ETHER:
844 return MAC_PROTO_ETHERNET;
845 case ARPHRD_NONE:
846 if (skb->protocol == htons(ETH_P_TEB))
847 return MAC_PROTO_ETHERNET;
848 return MAC_PROTO_NONE;
849 }
850 WARN_ON_ONCE(1);
851 return -EINVAL;
852}
853
854int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
855 struct sk_buff *skb, struct sw_flow_key *key)
856{
857#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
858 struct tc_skb_ext *tc_ext;
859#endif
860 bool post_ct = false;
861 int res, err;
862
863
864 if (tun_info) {
865 key->tun_proto = ip_tunnel_info_af(tun_info);
866 memcpy(&key->tun_key, &tun_info->key, sizeof(key->tun_key));
867
868 if (tun_info->options_len) {
869 BUILD_BUG_ON((1 << (sizeof(tun_info->options_len) *
870 8)) - 1
871 > sizeof(key->tun_opts));
872
873 ip_tunnel_info_opts_get(TUN_METADATA_OPTS(key, tun_info->options_len),
874 tun_info);
875 key->tun_opts_len = tun_info->options_len;
876 } else {
877 key->tun_opts_len = 0;
878 }
879 } else {
880 key->tun_proto = 0;
881 key->tun_opts_len = 0;
882 memset(&key->tun_key, 0, sizeof(key->tun_key));
883 }
884
885 key->phy.priority = skb->priority;
886 key->phy.in_port = OVS_CB(skb)->input_vport->port_no;
887 key->phy.skb_mark = skb->mark;
888 key->ovs_flow_hash = 0;
889 res = key_extract_mac_proto(skb);
890 if (res < 0)
891 return res;
892 key->mac_proto = res;
893
894#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
895 if (static_branch_unlikely(&tc_recirc_sharing_support)) {
896 tc_ext = skb_ext_find(skb, TC_SKB_EXT);
897 key->recirc_id = tc_ext ? tc_ext->chain : 0;
898 OVS_CB(skb)->mru = tc_ext ? tc_ext->mru : 0;
899 post_ct = tc_ext ? tc_ext->post_ct : false;
900 } else {
901 key->recirc_id = 0;
902 }
903#else
904 key->recirc_id = 0;
905#endif
906
907 err = key_extract(skb, key);
908 if (!err)
909 ovs_ct_fill_key(skb, key, post_ct);
910 return err;
911}
912
913int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr,
914 struct sk_buff *skb,
915 struct sw_flow_key *key, bool log)
916{
917 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
918 u64 attrs = 0;
919 int err;
920
921 err = parse_flow_nlattrs(attr, a, &attrs, log);
922 if (err)
923 return -EINVAL;
924
925
926 err = ovs_nla_get_flow_metadata(net, a, attrs, key, log);
927 if (err)
928 return err;
929
930
931
932
933
934
935
936
937
938
939 skb->protocol = key->eth.type;
940 err = key_extract(skb, key);
941 if (err)
942 return err;
943
944
945
946
947
948 if (attrs & (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4) &&
949 key->eth.type != htons(ETH_P_IP))
950 return -EINVAL;
951 if (attrs & (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6) &&
952 (key->eth.type != htons(ETH_P_IPV6) ||
953 sw_flow_key_is_nd(key)))
954 return -EINVAL;
955
956 return 0;
957}
958