1#include <linux/skbuff.h>
2#include <linux/netdevice.h>
3#include <linux/if_vlan.h>
4#include <linux/netpoll.h>
5#include "vlan.h"
6
7bool vlan_do_receive(struct sk_buff **skbp)
8{
9 struct sk_buff *skb = *skbp;
10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
11 struct net_device *vlan_dev;
12 struct vlan_pcpu_stats *rx_stats;
13
14 vlan_dev = vlan_find_dev(skb->dev, vlan_id);
15 if (!vlan_dev) {
16 if (vlan_id)
17 skb->pkt_type = PACKET_OTHERHOST;
18 return false;
19 }
20
21 skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
22 if (unlikely(!skb))
23 return false;
24
25 skb->dev = vlan_dev;
26 if (skb->pkt_type == PACKET_OTHERHOST) {
27
28
29
30 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
31 vlan_dev->dev_addr))
32 skb->pkt_type = PACKET_HOST;
33 }
34
35 if (!(vlan_dev_info(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
36 unsigned int offset = skb->data - skb_mac_header(skb);
37
38
39
40
41
42
43 skb_push(skb, offset);
44 skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci);
45 if (!skb)
46 return false;
47 skb_pull(skb, offset + VLAN_HLEN);
48 skb_reset_mac_len(skb);
49 }
50
51 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
52 skb->vlan_tci = 0;
53
54 rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats);
55
56 u64_stats_update_begin(&rx_stats->syncp);
57 rx_stats->rx_packets++;
58 rx_stats->rx_bytes += skb->len;
59 if (skb->pkt_type == PACKET_MULTICAST)
60 rx_stats->rx_multicast++;
61 u64_stats_update_end(&rx_stats->syncp);
62
63 return true;
64}
65
66
67struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
68 u16 vlan_id)
69{
70 struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp);
71
72 if (grp) {
73 return vlan_group_get_device(grp, vlan_id);
74 } else {
75
76
77
78
79 if (netif_is_bond_slave(real_dev))
80 return __vlan_find_dev_deep(real_dev->master, vlan_id);
81 }
82
83 return NULL;
84}
85EXPORT_SYMBOL(__vlan_find_dev_deep);
86
87struct net_device *vlan_dev_real_dev(const struct net_device *dev)
88{
89 return vlan_dev_info(dev)->real_dev;
90}
91EXPORT_SYMBOL(vlan_dev_real_dev);
92
93u16 vlan_dev_vlan_id(const struct net_device *dev)
94{
95 return vlan_dev_info(dev)->vlan_id;
96}
97EXPORT_SYMBOL(vlan_dev_vlan_id);
98
99static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
100{
101 if (skb_cow(skb, skb_headroom(skb)) < 0)
102 return NULL;
103 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
104 skb->mac_header += VLAN_HLEN;
105 skb_reset_mac_len(skb);
106 return skb;
107}
108
109static void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr)
110{
111 __be16 proto;
112 unsigned char *rawp;
113
114
115
116
117
118
119 proto = vhdr->h_vlan_encapsulated_proto;
120 if (ntohs(proto) >= 1536) {
121 skb->protocol = proto;
122 return;
123 }
124
125 rawp = skb->data;
126 if (*(unsigned short *) rawp == 0xFFFF)
127
128
129
130
131
132
133
134 skb->protocol = htons(ETH_P_802_3);
135 else
136
137
138
139 skb->protocol = htons(ETH_P_802_2);
140}
141
142struct sk_buff *vlan_untag(struct sk_buff *skb)
143{
144 struct vlan_hdr *vhdr;
145 u16 vlan_tci;
146
147 if (unlikely(vlan_tx_tag_present(skb))) {
148
149 return skb;
150 }
151
152 skb = skb_share_check(skb, GFP_ATOMIC);
153 if (unlikely(!skb))
154 goto err_free;
155
156 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
157 goto err_free;
158
159 vhdr = (struct vlan_hdr *) skb->data;
160 vlan_tci = ntohs(vhdr->h_vlan_TCI);
161 __vlan_hwaccel_put_tag(skb, vlan_tci);
162
163 skb_pull_rcsum(skb, VLAN_HLEN);
164 vlan_set_encap_proto(skb, vhdr);
165
166 skb = vlan_reorder_header(skb);
167 if (unlikely(!skb))
168 goto err_free;
169
170 skb_reset_network_header(skb);
171 skb_reset_transport_header(skb);
172 return skb;
173
174err_free:
175 kfree_skb(skb);
176 return NULL;
177}
178