1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/init.h>
35#include <linux/interrupt.h>
36#include <linux/moduleparam.h>
37#include <net/pkt_sched.h>
38#include <net/net_namespace.h>
39
40#define TX_Q_LIMIT 32
41struct ifb_private {
42 struct tasklet_struct ifb_tasklet;
43 int tasklet_pending;
44
45 struct u64_stats_sync rsync;
46 struct sk_buff_head rq;
47 u64 rx_packets;
48 u64 rx_bytes;
49
50 struct u64_stats_sync tsync;
51 struct sk_buff_head tq;
52 u64 tx_packets;
53 u64 tx_bytes;
54};
55
56static int numifbs = 2;
57
58static void ri_tasklet(unsigned long dev);
59static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
60static int ifb_open(struct net_device *dev);
61static int ifb_close(struct net_device *dev);
62
63static void ri_tasklet(unsigned long dev)
64{
65 struct net_device *_dev = (struct net_device *)dev;
66 struct ifb_private *dp = netdev_priv(_dev);
67 struct netdev_queue *txq;
68 struct sk_buff *skb;
69
70 txq = netdev_get_tx_queue(_dev, 0);
71 if ((skb = skb_peek(&dp->tq)) == NULL) {
72 if (__netif_tx_trylock(txq)) {
73 skb_queue_splice_tail_init(&dp->rq, &dp->tq);
74 __netif_tx_unlock(txq);
75 } else {
76
77 goto resched;
78 }
79 }
80
81 while ((skb = __skb_dequeue(&dp->tq)) != NULL) {
82 u32 from = G_TC_FROM(skb->tc_verd);
83
84 skb->tc_verd = 0;
85 skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
86
87 u64_stats_update_begin(&dp->tsync);
88 dp->tx_packets++;
89 dp->tx_bytes += skb->len;
90 u64_stats_update_end(&dp->tsync);
91
92 rcu_read_lock();
93 skb->dev = dev_get_by_index_rcu(dev_net(_dev), skb->skb_iif);
94 if (!skb->dev) {
95 rcu_read_unlock();
96 dev_kfree_skb(skb);
97 _dev->stats.tx_dropped++;
98 if (skb_queue_len(&dp->tq) != 0)
99 goto resched;
100 break;
101 }
102 rcu_read_unlock();
103 skb->skb_iif = _dev->ifindex;
104
105 if (from & AT_EGRESS) {
106 dev_queue_xmit(skb);
107 } else if (from & AT_INGRESS) {
108 skb_pull(skb, skb->dev->hard_header_len);
109 netif_receive_skb(skb);
110 } else
111 BUG();
112 }
113
114 if (__netif_tx_trylock(txq)) {
115 if ((skb = skb_peek(&dp->rq)) == NULL) {
116 dp->tasklet_pending = 0;
117 if (netif_queue_stopped(_dev))
118 netif_wake_queue(_dev);
119 } else {
120 __netif_tx_unlock(txq);
121 goto resched;
122 }
123 __netif_tx_unlock(txq);
124 } else {
125resched:
126 dp->tasklet_pending = 1;
127 tasklet_schedule(&dp->ifb_tasklet);
128 }
129
130}
131
132static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
133 struct rtnl_link_stats64 *stats)
134{
135 struct ifb_private *dp = netdev_priv(dev);
136 unsigned int start;
137
138 do {
139 start = u64_stats_fetch_begin_bh(&dp->rsync);
140 stats->rx_packets = dp->rx_packets;
141 stats->rx_bytes = dp->rx_bytes;
142 } while (u64_stats_fetch_retry_bh(&dp->rsync, start));
143
144 do {
145 start = u64_stats_fetch_begin_bh(&dp->tsync);
146
147 stats->tx_packets = dp->tx_packets;
148 stats->tx_bytes = dp->tx_bytes;
149
150 } while (u64_stats_fetch_retry_bh(&dp->tsync, start));
151
152 stats->rx_dropped = dev->stats.rx_dropped;
153 stats->tx_dropped = dev->stats.tx_dropped;
154
155 return stats;
156}
157
158
159static const struct net_device_ops ifb_netdev_ops = {
160 .ndo_open = ifb_open,
161 .ndo_stop = ifb_close,
162 .ndo_get_stats64 = ifb_stats64,
163 .ndo_start_xmit = ifb_xmit,
164 .ndo_validate_addr = eth_validate_addr,
165};
166
167#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
168 NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
169 NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
170 NETIF_F_HW_VLAN_STAG_TX)
171
172static void ifb_setup(struct net_device *dev)
173{
174
175 dev->destructor = free_netdev;
176 dev->netdev_ops = &ifb_netdev_ops;
177
178
179 ether_setup(dev);
180 dev->tx_queue_len = TX_Q_LIMIT;
181
182 dev->features |= IFB_FEATURES;
183 dev->vlan_features |= IFB_FEATURES;
184
185 dev->flags |= IFF_NOARP;
186 dev->flags &= ~IFF_MULTICAST;
187 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
188 eth_hw_addr_random(dev);
189}
190
191static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
192{
193 struct ifb_private *dp = netdev_priv(dev);
194 u32 from = G_TC_FROM(skb->tc_verd);
195
196 u64_stats_update_begin(&dp->rsync);
197 dp->rx_packets++;
198 dp->rx_bytes += skb->len;
199 u64_stats_update_end(&dp->rsync);
200
201 if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) {
202 dev_kfree_skb(skb);
203 dev->stats.rx_dropped++;
204 return NETDEV_TX_OK;
205 }
206
207 if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) {
208 netif_stop_queue(dev);
209 }
210
211 __skb_queue_tail(&dp->rq, skb);
212 if (!dp->tasklet_pending) {
213 dp->tasklet_pending = 1;
214 tasklet_schedule(&dp->ifb_tasklet);
215 }
216
217 return NETDEV_TX_OK;
218}
219
220static int ifb_close(struct net_device *dev)
221{
222 struct ifb_private *dp = netdev_priv(dev);
223
224 tasklet_kill(&dp->ifb_tasklet);
225 netif_stop_queue(dev);
226 __skb_queue_purge(&dp->rq);
227 __skb_queue_purge(&dp->tq);
228 return 0;
229}
230
231static int ifb_open(struct net_device *dev)
232{
233 struct ifb_private *dp = netdev_priv(dev);
234
235 tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev);
236 __skb_queue_head_init(&dp->rq);
237 __skb_queue_head_init(&dp->tq);
238 netif_start_queue(dev);
239
240 return 0;
241}
242
243static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
244{
245 if (tb[IFLA_ADDRESS]) {
246 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
247 return -EINVAL;
248 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
249 return -EADDRNOTAVAIL;
250 }
251 return 0;
252}
253
254static struct rtnl_link_ops ifb_link_ops __read_mostly = {
255 .kind = "ifb",
256 .priv_size = sizeof(struct ifb_private),
257 .setup = ifb_setup,
258 .validate = ifb_validate,
259};
260
261
262module_param(numifbs, int, 0);
263MODULE_PARM_DESC(numifbs, "Number of ifb devices");
264
265static int __init ifb_init_one(int index)
266{
267 struct net_device *dev_ifb;
268 int err;
269
270 dev_ifb = alloc_netdev(sizeof(struct ifb_private),
271 "ifb%d", ifb_setup);
272
273 if (!dev_ifb)
274 return -ENOMEM;
275
276 dev_ifb->rtnl_link_ops = &ifb_link_ops;
277 err = register_netdevice(dev_ifb);
278 if (err < 0)
279 goto err;
280
281 return 0;
282
283err:
284 free_netdev(dev_ifb);
285 return err;
286}
287
288static int __init ifb_init_module(void)
289{
290 int i, err;
291
292 rtnl_lock();
293 err = __rtnl_link_register(&ifb_link_ops);
294 if (err < 0)
295 goto out;
296
297 for (i = 0; i < numifbs && !err; i++) {
298 err = ifb_init_one(i);
299 cond_resched();
300 }
301 if (err)
302 __rtnl_link_unregister(&ifb_link_ops);
303
304out:
305 rtnl_unlock();
306
307 return err;
308}
309
310static void __exit ifb_cleanup_module(void)
311{
312 rtnl_link_unregister(&ifb_link_ops);
313}
314
315module_init(ifb_init_module);
316module_exit(ifb_cleanup_module);
317MODULE_LICENSE("GPL");
318MODULE_AUTHOR("Jamal Hadi Salim");
319MODULE_ALIAS_RTNL_LINK("ifb");
320