1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/if_arp.h>
22#include <linux/if_bridge.h>
23#include <linux/if_vlan.h>
24#include <linux/kernel.h>
25#include <linux/llc.h>
26#include <linux/rtnetlink.h>
27#include <linux/skbuff.h>
28#include <linux/openvswitch.h>
29#include <linux/export.h>
30
31#include <net/ip_tunnels.h>
32#include <net/rtnetlink.h>
33
34#include "datapath.h"
35#include "vport.h"
36#include "vport-internal_dev.h"
37#include "vport-netdev.h"
38
39static struct vport_ops ovs_netdev_vport_ops;
40
41
42static void netdev_port_receive(struct sk_buff *skb)
43{
44 struct vport *vport;
45
46 vport = ovs_netdev_get_vport(skb->dev);
47 if (unlikely(!vport))
48 goto error;
49
50 if (unlikely(skb_warn_if_lro(skb)))
51 goto error;
52
53
54
55
56 skb = skb_share_check(skb, GFP_ATOMIC);
57 if (unlikely(!skb))
58 return;
59
60 skb_push(skb, ETH_HLEN);
61 ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
62 ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
63 return;
64error:
65 kfree_skb(skb);
66}
67
68
69static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb)
70{
71 struct sk_buff *skb = *pskb;
72
73 if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
74 return RX_HANDLER_PASS;
75
76 netdev_port_receive(skb);
77 return RX_HANDLER_CONSUMED;
78}
79
80static struct net_device *get_dpdev(const struct datapath *dp)
81{
82 struct vport *local;
83
84 local = ovs_vport_ovsl(dp, OVSP_LOCAL);
85 BUG_ON(!local);
86 return local->dev;
87}
88
89struct vport *ovs_netdev_link(struct vport *vport, const char *name)
90{
91 int err;
92
93 vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), name);
94 if (!vport->dev) {
95 err = -ENODEV;
96 goto error_free_vport;
97 }
98
99 if (vport->dev->flags & IFF_LOOPBACK ||
100 vport->dev->type != ARPHRD_ETHER ||
101 ovs_is_internal_dev(vport->dev)) {
102 err = -EINVAL;
103 goto error_put;
104 }
105
106 rtnl_lock();
107 err = netdev_master_upper_dev_link(vport->dev,
108 get_dpdev(vport->dp));
109 if (err)
110 goto error_unlock;
111
112 err = netdev_rx_handler_register(vport->dev, netdev_frame_hook,
113 vport);
114 if (err)
115 goto error_master_upper_dev_unlink;
116
117 dev_disable_lro(vport->dev);
118 dev_set_promiscuity(vport->dev, 1);
119 vport->dev->priv_flags |= IFF_OVS_DATAPATH;
120 rtnl_unlock();
121
122 return vport;
123
124error_master_upper_dev_unlink:
125 netdev_upper_dev_unlink(vport->dev, get_dpdev(vport->dp));
126error_unlock:
127 rtnl_unlock();
128error_put:
129 dev_put(vport->dev);
130error_free_vport:
131 ovs_vport_free(vport);
132 return ERR_PTR(err);
133}
134EXPORT_SYMBOL_GPL(ovs_netdev_link);
135
136static struct vport *netdev_create(const struct vport_parms *parms)
137{
138 struct vport *vport;
139
140 vport = ovs_vport_alloc(0, &ovs_netdev_vport_ops, parms);
141 if (IS_ERR(vport))
142 return vport;
143
144 return ovs_netdev_link(vport, parms->name);
145}
146
147static void vport_netdev_free(struct rcu_head *rcu)
148{
149 struct vport *vport = container_of(rcu, struct vport, rcu);
150
151 if (vport->dev)
152 dev_put(vport->dev);
153 ovs_vport_free(vport);
154}
155
156void ovs_netdev_detach_dev(struct vport *vport)
157{
158 ASSERT_RTNL();
159 vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
160 netdev_rx_handler_unregister(vport->dev);
161 netdev_upper_dev_unlink(vport->dev,
162 netdev_master_upper_dev_get(vport->dev));
163 dev_set_promiscuity(vport->dev, -1);
164}
165EXPORT_SYMBOL_GPL(ovs_netdev_detach_dev);
166
167static void netdev_destroy(struct vport *vport)
168{
169 rtnl_lock();
170 if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
171 ovs_netdev_detach_dev(vport);
172 rtnl_unlock();
173
174 call_rcu(&vport->rcu, vport_netdev_free);
175}
176
177void ovs_netdev_tunnel_destroy(struct vport *vport)
178{
179 rtnl_lock();
180 if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
181 ovs_netdev_detach_dev(vport);
182
183
184
185
186
187 if (vport->dev->reg_state == NETREG_REGISTERED)
188 rtnl_delete_link(vport->dev);
189 dev_put(vport->dev);
190 vport->dev = NULL;
191 rtnl_unlock();
192
193 call_rcu(&vport->rcu, vport_netdev_free);
194}
195EXPORT_SYMBOL_GPL(ovs_netdev_tunnel_destroy);
196
197
198struct vport *ovs_netdev_get_vport(struct net_device *dev)
199{
200 if (likely(dev->priv_flags & IFF_OVS_DATAPATH))
201 return (struct vport *)
202 rcu_dereference_rtnl(dev->rx_handler_data);
203 else
204 return NULL;
205}
206
207static struct vport_ops ovs_netdev_vport_ops = {
208 .type = OVS_VPORT_TYPE_NETDEV,
209 .create = netdev_create,
210 .destroy = netdev_destroy,
211 .send = dev_queue_xmit,
212};
213
214int __init ovs_netdev_init(void)
215{
216 return ovs_vport_ops_register(&ovs_netdev_vport_ops);
217}
218
219void ovs_netdev_exit(void)
220{
221 ovs_vport_ops_unregister(&ovs_netdev_vport_ops);
222}
223