1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
30#define DRV_NAME "tun"
31#define DRV_VERSION "1.6"
32#define DRV_DESCRIPTION "Universal TUN/TAP device driver"
33#define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
34
35#include <linux/module.h>
36#include <linux/errno.h>
37#include <linux/kernel.h>
38#include <linux/sched/signal.h>
39#include <linux/major.h>
40#include <linux/slab.h>
41#include <linux/poll.h>
42#include <linux/fcntl.h>
43#include <linux/init.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/etherdevice.h>
47#include <linux/miscdevice.h>
48#include <linux/ethtool.h>
49#include <linux/rtnetlink.h>
50#include <linux/compat.h>
51#include <linux/if.h>
52#include <linux/if_arp.h>
53#include <linux/if_ether.h>
54#include <linux/if_tun.h>
55#include <linux/if_vlan.h>
56#include <linux/crc32.h>
57#include <linux/nsproxy.h>
58#include <linux/virtio_net.h>
59#include <linux/rcupdate.h>
60#include <net/net_namespace.h>
61#include <net/netns/generic.h>
62#include <net/rtnetlink.h>
63#include <net/sock.h>
64#include <net/xdp.h>
65#include <net/ip_tunnels.h>
66#include <linux/seq_file.h>
67#include <linux/uio.h>
68#include <linux/skb_array.h>
69#include <linux/bpf.h>
70#include <linux/bpf_trace.h>
71#include <linux/mutex.h>
72#include <linux/ieee802154.h>
73#include <linux/if_ltalk.h>
74#include <uapi/linux/if_fddi.h>
75#include <uapi/linux/if_hippi.h>
76#include <uapi/linux/if_fc.h>
77#include <net/ax25.h>
78#include <net/rose.h>
79#include <net/6lowpan.h>
80
81#include <linux/uaccess.h>
82#include <linux/proc_fs.h>
83
84static void tun_default_link_ksettings(struct net_device *dev,
85 struct ethtool_link_ksettings *cmd);
86
87#define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
88
89
90
91
92
93
94#define TUN_FASYNC IFF_ATTACH_QUEUE
95
96#define TUN_VNET_LE 0x80000000
97#define TUN_VNET_BE 0x40000000
98
99#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
100 IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
101
102#define GOODCOPY_LEN 128
103
104#define FLT_EXACT_COUNT 8
105struct tap_filter {
106 unsigned int count;
107 u32 mask[2];
108 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
109};
110
111
112
113#define MAX_TAP_QUEUES 256
114#define MAX_TAP_FLOWS 4096
115
116#define TUN_FLOW_EXPIRE (3 * HZ)
117
118
119
120
121
122
123
124
125
126
127
128
129struct tun_file {
130 struct sock sk;
131 struct socket socket;
132 struct tun_struct __rcu *tun;
133 struct fasync_struct *fasync;
134
135 unsigned int flags;
136 union {
137 u16 queue_index;
138 unsigned int ifindex;
139 };
140 struct napi_struct napi;
141 bool napi_enabled;
142 bool napi_frags_enabled;
143 struct mutex napi_mutex;
144 struct list_head next;
145 struct tun_struct *detached;
146 struct ptr_ring tx_ring;
147 struct xdp_rxq_info xdp_rxq;
148};
149
150struct tun_page {
151 struct page *page;
152 int count;
153};
154
155struct tun_flow_entry {
156 struct hlist_node hash_link;
157 struct rcu_head rcu;
158 struct tun_struct *tun;
159
160 u32 rxhash;
161 u32 rps_rxhash;
162 int queue_index;
163 unsigned long updated ____cacheline_aligned_in_smp;
164};
165
166#define TUN_NUM_FLOW_ENTRIES 1024
167#define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
168
169struct tun_prog {
170 struct rcu_head rcu;
171 struct bpf_prog *prog;
172};
173
174
175
176
177
178struct tun_struct {
179 struct tun_file __rcu *tfiles[MAX_TAP_QUEUES];
180 unsigned int numqueues;
181 unsigned int flags;
182 kuid_t owner;
183 kgid_t group;
184
185 struct net_device *dev;
186 netdev_features_t set_features;
187#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
188 NETIF_F_TSO6)
189
190 int align;
191 int vnet_hdr_sz;
192 int sndbuf;
193 struct tap_filter txflt;
194 struct sock_fprog fprog;
195
196 bool filter_attached;
197 u32 msg_enable;
198 spinlock_t lock;
199 struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
200 struct timer_list flow_gc_timer;
201 unsigned long ageing_time;
202 unsigned int numdisabled;
203 struct list_head disabled;
204 void *security;
205 u32 flow_count;
206 u32 rx_batched;
207 atomic_long_t rx_frame_errors;
208 struct bpf_prog __rcu *xdp_prog;
209 struct tun_prog __rcu *steering_prog;
210 struct tun_prog __rcu *filter_prog;
211 struct ethtool_link_ksettings link_ksettings;
212
213 struct file *file;
214 struct ifreq *ifr;
215};
216
217struct veth {
218 __be16 h_vlan_proto;
219 __be16 h_vlan_TCI;
220};
221
222static void tun_flow_init(struct tun_struct *tun);
223static void tun_flow_uninit(struct tun_struct *tun);
224
225static int tun_napi_receive(struct napi_struct *napi, int budget)
226{
227 struct tun_file *tfile = container_of(napi, struct tun_file, napi);
228 struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
229 struct sk_buff_head process_queue;
230 struct sk_buff *skb;
231 int received = 0;
232
233 __skb_queue_head_init(&process_queue);
234
235 spin_lock(&queue->lock);
236 skb_queue_splice_tail_init(queue, &process_queue);
237 spin_unlock(&queue->lock);
238
239 while (received < budget && (skb = __skb_dequeue(&process_queue))) {
240 napi_gro_receive(napi, skb);
241 ++received;
242 }
243
244 if (!skb_queue_empty(&process_queue)) {
245 spin_lock(&queue->lock);
246 skb_queue_splice(&process_queue, queue);
247 spin_unlock(&queue->lock);
248 }
249
250 return received;
251}
252
253static int tun_napi_poll(struct napi_struct *napi, int budget)
254{
255 unsigned int received;
256
257 received = tun_napi_receive(napi, budget);
258
259 if (received < budget)
260 napi_complete_done(napi, received);
261
262 return received;
263}
264
265static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
266 bool napi_en, bool napi_frags)
267{
268 tfile->napi_enabled = napi_en;
269 tfile->napi_frags_enabled = napi_en && napi_frags;
270 if (napi_en) {
271 netif_tx_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
272 NAPI_POLL_WEIGHT);
273 napi_enable(&tfile->napi);
274 }
275}
276
277static void tun_napi_disable(struct tun_file *tfile)
278{
279 if (tfile->napi_enabled)
280 napi_disable(&tfile->napi);
281}
282
283static void tun_napi_del(struct tun_file *tfile)
284{
285 if (tfile->napi_enabled)
286 netif_napi_del(&tfile->napi);
287}
288
289static bool tun_napi_frags_enabled(const struct tun_file *tfile)
290{
291 return tfile->napi_frags_enabled;
292}
293
294#ifdef CONFIG_TUN_VNET_CROSS_LE
295static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
296{
297 return tun->flags & TUN_VNET_BE ? false :
298 virtio_legacy_is_little_endian();
299}
300
301static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
302{
303 int be = !!(tun->flags & TUN_VNET_BE);
304
305 if (put_user(be, argp))
306 return -EFAULT;
307
308 return 0;
309}
310
311static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
312{
313 int be;
314
315 if (get_user(be, argp))
316 return -EFAULT;
317
318 if (be)
319 tun->flags |= TUN_VNET_BE;
320 else
321 tun->flags &= ~TUN_VNET_BE;
322
323 return 0;
324}
325#else
326static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
327{
328 return virtio_legacy_is_little_endian();
329}
330
331static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
332{
333 return -EINVAL;
334}
335
336static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
337{
338 return -EINVAL;
339}
340#endif
341
342static inline bool tun_is_little_endian(struct tun_struct *tun)
343{
344 return tun->flags & TUN_VNET_LE ||
345 tun_legacy_is_little_endian(tun);
346}
347
348static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
349{
350 return __virtio16_to_cpu(tun_is_little_endian(tun), val);
351}
352
353static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
354{
355 return __cpu_to_virtio16(tun_is_little_endian(tun), val);
356}
357
358static inline u32 tun_hashfn(u32 rxhash)
359{
360 return rxhash & TUN_MASK_FLOW_ENTRIES;
361}
362
363static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
364{
365 struct tun_flow_entry *e;
366
367 hlist_for_each_entry_rcu(e, head, hash_link) {
368 if (e->rxhash == rxhash)
369 return e;
370 }
371 return NULL;
372}
373
374static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
375 struct hlist_head *head,
376 u32 rxhash, u16 queue_index)
377{
378 struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
379
380 if (e) {
381 netif_info(tun, tx_queued, tun->dev,
382 "create flow: hash %u index %u\n",
383 rxhash, queue_index);
384 e->updated = jiffies;
385 e->rxhash = rxhash;
386 e->rps_rxhash = 0;
387 e->queue_index = queue_index;
388 e->tun = tun;
389 hlist_add_head_rcu(&e->hash_link, head);
390 ++tun->flow_count;
391 }
392 return e;
393}
394
395static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
396{
397 netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n",
398 e->rxhash, e->queue_index);
399 hlist_del_rcu(&e->hash_link);
400 kfree_rcu(e, rcu);
401 --tun->flow_count;
402}
403
404static void tun_flow_flush(struct tun_struct *tun)
405{
406 int i;
407
408 spin_lock_bh(&tun->lock);
409 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
410 struct tun_flow_entry *e;
411 struct hlist_node *n;
412
413 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
414 tun_flow_delete(tun, e);
415 }
416 spin_unlock_bh(&tun->lock);
417}
418
419static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
420{
421 int i;
422
423 spin_lock_bh(&tun->lock);
424 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
425 struct tun_flow_entry *e;
426 struct hlist_node *n;
427
428 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
429 if (e->queue_index == queue_index)
430 tun_flow_delete(tun, e);
431 }
432 }
433 spin_unlock_bh(&tun->lock);
434}
435
436static void tun_flow_cleanup(struct timer_list *t)
437{
438 struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
439 unsigned long delay = tun->ageing_time;
440 unsigned long next_timer = jiffies + delay;
441 unsigned long count = 0;
442 int i;
443
444 spin_lock(&tun->lock);
445 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
446 struct tun_flow_entry *e;
447 struct hlist_node *n;
448
449 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
450 unsigned long this_timer;
451
452 this_timer = e->updated + delay;
453 if (time_before_eq(this_timer, jiffies)) {
454 tun_flow_delete(tun, e);
455 continue;
456 }
457 count++;
458 if (time_before(this_timer, next_timer))
459 next_timer = this_timer;
460 }
461 }
462
463 if (count)
464 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
465 spin_unlock(&tun->lock);
466}
467
468static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
469 struct tun_file *tfile)
470{
471 struct hlist_head *head;
472 struct tun_flow_entry *e;
473 unsigned long delay = tun->ageing_time;
474 u16 queue_index = tfile->queue_index;
475
476 head = &tun->flows[tun_hashfn(rxhash)];
477
478 rcu_read_lock();
479
480 e = tun_flow_find(head, rxhash);
481 if (likely(e)) {
482
483 if (READ_ONCE(e->queue_index) != queue_index)
484 WRITE_ONCE(e->queue_index, queue_index);
485 if (e->updated != jiffies)
486 e->updated = jiffies;
487 sock_rps_record_flow_hash(e->rps_rxhash);
488 } else {
489 spin_lock_bh(&tun->lock);
490 if (!tun_flow_find(head, rxhash) &&
491 tun->flow_count < MAX_TAP_FLOWS)
492 tun_flow_create(tun, head, rxhash, queue_index);
493
494 if (!timer_pending(&tun->flow_gc_timer))
495 mod_timer(&tun->flow_gc_timer,
496 round_jiffies_up(jiffies + delay));
497 spin_unlock_bh(&tun->lock);
498 }
499
500 rcu_read_unlock();
501}
502
503
504
505
506static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
507{
508 if (unlikely(e->rps_rxhash != hash))
509 e->rps_rxhash = hash;
510}
511
512
513
514
515
516
517
518static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
519{
520 struct tun_flow_entry *e;
521 u32 txq = 0;
522 u32 numqueues = 0;
523
524 numqueues = READ_ONCE(tun->numqueues);
525
526 txq = __skb_get_hash_symmetric(skb);
527 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
528 if (e) {
529 tun_flow_save_rps_rxhash(e, txq);
530 txq = e->queue_index;
531 } else {
532
533 txq = ((u64)txq * numqueues) >> 32;
534 }
535
536 return txq;
537}
538
539static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
540{
541 struct tun_prog *prog;
542 u32 numqueues;
543 u16 ret = 0;
544
545 numqueues = READ_ONCE(tun->numqueues);
546 if (!numqueues)
547 return 0;
548
549 prog = rcu_dereference(tun->steering_prog);
550 if (prog)
551 ret = bpf_prog_run_clear_cb(prog->prog, skb);
552
553 return ret % numqueues;
554}
555
556static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
557 struct net_device *sb_dev)
558{
559 struct tun_struct *tun = netdev_priv(dev);
560 u16 ret;
561
562 rcu_read_lock();
563 if (rcu_dereference(tun->steering_prog))
564 ret = tun_ebpf_select_queue(tun, skb);
565 else
566 ret = tun_automq_select_queue(tun, skb);
567 rcu_read_unlock();
568
569 return ret;
570}
571
572static inline bool tun_not_capable(struct tun_struct *tun)
573{
574 const struct cred *cred = current_cred();
575 struct net *net = dev_net(tun->dev);
576
577 return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
578 (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
579 !ns_capable(net->user_ns, CAP_NET_ADMIN);
580}
581
582static void tun_set_real_num_queues(struct tun_struct *tun)
583{
584 netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
585 netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
586}
587
588static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
589{
590 tfile->detached = tun;
591 list_add_tail(&tfile->next, &tun->disabled);
592 ++tun->numdisabled;
593}
594
595static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
596{
597 struct tun_struct *tun = tfile->detached;
598
599 tfile->detached = NULL;
600 list_del_init(&tfile->next);
601 --tun->numdisabled;
602 return tun;
603}
604
605void tun_ptr_free(void *ptr)
606{
607 if (!ptr)
608 return;
609 if (tun_is_xdp_frame(ptr)) {
610 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
611
612 xdp_return_frame(xdpf);
613 } else {
614 __skb_array_destroy_skb(ptr);
615 }
616}
617EXPORT_SYMBOL_GPL(tun_ptr_free);
618
619static void tun_queue_purge(struct tun_file *tfile)
620{
621 void *ptr;
622
623 while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
624 tun_ptr_free(ptr);
625
626 skb_queue_purge(&tfile->sk.sk_write_queue);
627 skb_queue_purge(&tfile->sk.sk_error_queue);
628}
629
630static void __tun_detach(struct tun_file *tfile, bool clean)
631{
632 struct tun_file *ntfile;
633 struct tun_struct *tun;
634
635 tun = rtnl_dereference(tfile->tun);
636
637 if (tun && clean) {
638 tun_napi_disable(tfile);
639 tun_napi_del(tfile);
640 }
641
642 if (tun && !tfile->detached) {
643 u16 index = tfile->queue_index;
644 BUG_ON(index >= tun->numqueues);
645
646 rcu_assign_pointer(tun->tfiles[index],
647 tun->tfiles[tun->numqueues - 1]);
648 ntfile = rtnl_dereference(tun->tfiles[index]);
649 ntfile->queue_index = index;
650 rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
651 NULL);
652
653 --tun->numqueues;
654 if (clean) {
655 RCU_INIT_POINTER(tfile->tun, NULL);
656 sock_put(&tfile->sk);
657 } else
658 tun_disable_queue(tun, tfile);
659
660 synchronize_net();
661 tun_flow_delete_by_queue(tun, tun->numqueues + 1);
662
663 tun_queue_purge(tfile);
664 tun_set_real_num_queues(tun);
665 } else if (tfile->detached && clean) {
666 tun = tun_enable_queue(tfile);
667 sock_put(&tfile->sk);
668 }
669
670 if (clean) {
671 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
672 netif_carrier_off(tun->dev);
673
674 if (!(tun->flags & IFF_PERSIST) &&
675 tun->dev->reg_state == NETREG_REGISTERED)
676 unregister_netdevice(tun->dev);
677 }
678 if (tun)
679 xdp_rxq_info_unreg(&tfile->xdp_rxq);
680 ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
681 sock_put(&tfile->sk);
682 }
683}
684
685static void tun_detach(struct tun_file *tfile, bool clean)
686{
687 struct tun_struct *tun;
688 struct net_device *dev;
689
690 rtnl_lock();
691 tun = rtnl_dereference(tfile->tun);
692 dev = tun ? tun->dev : NULL;
693 __tun_detach(tfile, clean);
694 if (dev)
695 netdev_state_change(dev);
696 rtnl_unlock();
697}
698
699static void tun_detach_all(struct net_device *dev)
700{
701 struct tun_struct *tun = netdev_priv(dev);
702 struct tun_file *tfile, *tmp;
703 int i, n = tun->numqueues;
704
705 for (i = 0; i < n; i++) {
706 tfile = rtnl_dereference(tun->tfiles[i]);
707 BUG_ON(!tfile);
708 tun_napi_disable(tfile);
709 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
710 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
711 RCU_INIT_POINTER(tfile->tun, NULL);
712 --tun->numqueues;
713 }
714 list_for_each_entry(tfile, &tun->disabled, next) {
715 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
716 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
717 RCU_INIT_POINTER(tfile->tun, NULL);
718 }
719 BUG_ON(tun->numqueues != 0);
720
721 synchronize_net();
722 for (i = 0; i < n; i++) {
723 tfile = rtnl_dereference(tun->tfiles[i]);
724 tun_napi_del(tfile);
725
726 tun_queue_purge(tfile);
727 xdp_rxq_info_unreg(&tfile->xdp_rxq);
728 sock_put(&tfile->sk);
729 }
730 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
731 tun_enable_queue(tfile);
732 tun_queue_purge(tfile);
733 xdp_rxq_info_unreg(&tfile->xdp_rxq);
734 sock_put(&tfile->sk);
735 }
736 BUG_ON(tun->numdisabled != 0);
737
738 if (tun->flags & IFF_PERSIST)
739 module_put(THIS_MODULE);
740}
741
742static int tun_attach(struct tun_struct *tun, struct file *file,
743 bool skip_filter, bool napi, bool napi_frags,
744 bool publish_tun)
745{
746 struct tun_file *tfile = file->private_data;
747 struct net_device *dev = tun->dev;
748 int err;
749
750 err = security_tun_dev_attach(tfile->socket.sk, tun->security);
751 if (err < 0)
752 goto out;
753
754 err = -EINVAL;
755 if (rtnl_dereference(tfile->tun) && !tfile->detached)
756 goto out;
757
758 err = -EBUSY;
759 if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
760 goto out;
761
762 err = -E2BIG;
763 if (!tfile->detached &&
764 tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
765 goto out;
766
767 err = 0;
768
769
770 if (!skip_filter && (tun->filter_attached == true)) {
771 lock_sock(tfile->socket.sk);
772 err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
773 release_sock(tfile->socket.sk);
774 if (!err)
775 goto out;
776 }
777
778 if (!tfile->detached &&
779 ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
780 GFP_KERNEL, tun_ptr_free)) {
781 err = -ENOMEM;
782 goto out;
783 }
784
785 tfile->queue_index = tun->numqueues;
786 tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
787
788 if (tfile->detached) {
789
790 WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
791
792 if (tfile->xdp_rxq.queue_index != tfile->queue_index)
793 tfile->xdp_rxq.queue_index = tfile->queue_index;
794 } else {
795
796 err = xdp_rxq_info_reg(&tfile->xdp_rxq,
797 tun->dev, tfile->queue_index, 0);
798 if (err < 0)
799 goto out;
800 err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
801 MEM_TYPE_PAGE_SHARED, NULL);
802 if (err < 0) {
803 xdp_rxq_info_unreg(&tfile->xdp_rxq);
804 goto out;
805 }
806 err = 0;
807 }
808
809 if (tfile->detached) {
810 tun_enable_queue(tfile);
811 } else {
812 sock_hold(&tfile->sk);
813 tun_napi_init(tun, tfile, napi, napi_frags);
814 }
815
816 if (rtnl_dereference(tun->xdp_prog))
817 sock_set_flag(&tfile->sk, SOCK_XDP);
818
819
820
821
822
823
824
825
826
827 if (publish_tun)
828 rcu_assign_pointer(tfile->tun, tun);
829 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
830 tun->numqueues++;
831 tun_set_real_num_queues(tun);
832out:
833 return err;
834}
835
836static struct tun_struct *tun_get(struct tun_file *tfile)
837{
838 struct tun_struct *tun;
839
840 rcu_read_lock();
841 tun = rcu_dereference(tfile->tun);
842 if (tun)
843 dev_hold(tun->dev);
844 rcu_read_unlock();
845
846 return tun;
847}
848
849static void tun_put(struct tun_struct *tun)
850{
851 dev_put(tun->dev);
852}
853
854
855static void addr_hash_set(u32 *mask, const u8 *addr)
856{
857 int n = ether_crc(ETH_ALEN, addr) >> 26;
858 mask[n >> 5] |= (1 << (n & 31));
859}
860
861static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
862{
863 int n = ether_crc(ETH_ALEN, addr) >> 26;
864 return mask[n >> 5] & (1 << (n & 31));
865}
866
867static int update_filter(struct tap_filter *filter, void __user *arg)
868{
869 struct { u8 u[ETH_ALEN]; } *addr;
870 struct tun_filter uf;
871 int err, alen, n, nexact;
872
873 if (copy_from_user(&uf, arg, sizeof(uf)))
874 return -EFAULT;
875
876 if (!uf.count) {
877
878 filter->count = 0;
879 return 0;
880 }
881
882 alen = ETH_ALEN * uf.count;
883 addr = memdup_user(arg + sizeof(uf), alen);
884 if (IS_ERR(addr))
885 return PTR_ERR(addr);
886
887
888
889
890 filter->count = 0;
891 wmb();
892
893
894 for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
895 memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
896
897 nexact = n;
898
899
900
901 memset(filter->mask, 0, sizeof(filter->mask));
902 for (; n < uf.count; n++) {
903 if (!is_multicast_ether_addr(addr[n].u)) {
904 err = 0;
905 goto free_addr;
906 }
907 addr_hash_set(filter->mask, addr[n].u);
908 }
909
910
911
912 if ((uf.flags & TUN_FLT_ALLMULTI))
913 memset(filter->mask, ~0, sizeof(filter->mask));
914
915
916 wmb();
917 filter->count = nexact;
918
919
920 err = nexact;
921free_addr:
922 kfree(addr);
923 return err;
924}
925
926
927static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
928{
929
930
931 struct ethhdr *eh = (struct ethhdr *) skb->data;
932 int i;
933
934
935 for (i = 0; i < filter->count; i++)
936 if (ether_addr_equal(eh->h_dest, filter->addr[i]))
937 return 1;
938
939
940 if (is_multicast_ether_addr(eh->h_dest))
941 return addr_hash_test(filter->mask, eh->h_dest);
942
943 return 0;
944}
945
946
947
948
949
950static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
951{
952 if (!filter->count)
953 return 1;
954
955 return run_filter(filter, skb);
956}
957
958
959
960static const struct ethtool_ops tun_ethtool_ops;
961
962static int tun_net_init(struct net_device *dev)
963{
964 struct tun_struct *tun = netdev_priv(dev);
965 struct ifreq *ifr = tun->ifr;
966 int err;
967
968 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
969 if (!dev->tstats)
970 return -ENOMEM;
971
972 spin_lock_init(&tun->lock);
973
974 err = security_tun_dev_alloc_security(&tun->security);
975 if (err < 0) {
976 free_percpu(dev->tstats);
977 return err;
978 }
979
980 tun_flow_init(tun);
981
982 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
983 TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
984 NETIF_F_HW_VLAN_STAG_TX;
985 dev->features = dev->hw_features | NETIF_F_LLTX;
986 dev->vlan_features = dev->features &
987 ~(NETIF_F_HW_VLAN_CTAG_TX |
988 NETIF_F_HW_VLAN_STAG_TX);
989
990 tun->flags = (tun->flags & ~TUN_FEATURES) |
991 (ifr->ifr_flags & TUN_FEATURES);
992
993 INIT_LIST_HEAD(&tun->disabled);
994 err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI,
995 ifr->ifr_flags & IFF_NAPI_FRAGS, false);
996 if (err < 0) {
997 tun_flow_uninit(tun);
998 security_tun_dev_free_security(tun->security);
999 free_percpu(dev->tstats);
1000 return err;
1001 }
1002 return 0;
1003}
1004
1005
1006static void tun_net_uninit(struct net_device *dev)
1007{
1008 tun_detach_all(dev);
1009}
1010
1011
1012static int tun_net_open(struct net_device *dev)
1013{
1014 netif_tx_start_all_queues(dev);
1015
1016 return 0;
1017}
1018
1019
1020static int tun_net_close(struct net_device *dev)
1021{
1022 netif_tx_stop_all_queues(dev);
1023 return 0;
1024}
1025
1026
1027static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
1028{
1029#ifdef CONFIG_RPS
1030 if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
1031
1032
1033
1034 struct tun_flow_entry *e;
1035 __u32 rxhash;
1036
1037 rxhash = __skb_get_hash_symmetric(skb);
1038 e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
1039 if (e)
1040 tun_flow_save_rps_rxhash(e, rxhash);
1041 }
1042#endif
1043}
1044
1045static unsigned int run_ebpf_filter(struct tun_struct *tun,
1046 struct sk_buff *skb,
1047 int len)
1048{
1049 struct tun_prog *prog = rcu_dereference(tun->filter_prog);
1050
1051 if (prog)
1052 len = bpf_prog_run_clear_cb(prog->prog, skb);
1053
1054 return len;
1055}
1056
1057
1058static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
1059{
1060 struct tun_struct *tun = netdev_priv(dev);
1061 int txq = skb->queue_mapping;
1062 struct netdev_queue *queue;
1063 struct tun_file *tfile;
1064 int len = skb->len;
1065
1066 rcu_read_lock();
1067 tfile = rcu_dereference(tun->tfiles[txq]);
1068
1069
1070 if (!tfile)
1071 goto drop;
1072
1073 if (!rcu_dereference(tun->steering_prog))
1074 tun_automq_xmit(tun, skb);
1075
1076 netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len);
1077
1078
1079
1080
1081 if (!check_filter(&tun->txflt, skb))
1082 goto drop;
1083
1084 if (tfile->socket.sk->sk_filter &&
1085 sk_filter(tfile->socket.sk, skb))
1086 goto drop;
1087
1088 len = run_ebpf_filter(tun, skb, len);
1089 if (len == 0 || pskb_trim(skb, len))
1090 goto drop;
1091
1092 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
1093 goto drop;
1094
1095 skb_tx_timestamp(skb);
1096
1097
1098
1099
1100 skb_orphan(skb);
1101
1102 nf_reset_ct(skb);
1103
1104 if (ptr_ring_produce(&tfile->tx_ring, skb))
1105 goto drop;
1106
1107
1108 queue = netdev_get_tx_queue(dev, txq);
1109 queue->trans_start = jiffies;
1110
1111
1112 if (tfile->flags & TUN_FASYNC)
1113 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1114 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1115
1116 rcu_read_unlock();
1117 return NETDEV_TX_OK;
1118
1119drop:
1120 atomic_long_inc(&dev->tx_dropped);
1121 skb_tx_error(skb);
1122 kfree_skb(skb);
1123 rcu_read_unlock();
1124 return NET_XMIT_DROP;
1125}
1126
1127static void tun_net_mclist(struct net_device *dev)
1128{
1129
1130
1131
1132
1133
1134}
1135
1136static netdev_features_t tun_net_fix_features(struct net_device *dev,
1137 netdev_features_t features)
1138{
1139 struct tun_struct *tun = netdev_priv(dev);
1140
1141 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
1142}
1143
1144static void tun_set_headroom(struct net_device *dev, int new_hr)
1145{
1146 struct tun_struct *tun = netdev_priv(dev);
1147
1148 if (new_hr < NET_SKB_PAD)
1149 new_hr = NET_SKB_PAD;
1150
1151 tun->align = new_hr;
1152}
1153
1154static void
1155tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1156{
1157 struct tun_struct *tun = netdev_priv(dev);
1158
1159 dev_get_tstats64(dev, stats);
1160
1161 stats->rx_frame_errors +=
1162 (unsigned long)atomic_long_read(&tun->rx_frame_errors);
1163}
1164
1165static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1166 struct netlink_ext_ack *extack)
1167{
1168 struct tun_struct *tun = netdev_priv(dev);
1169 struct tun_file *tfile;
1170 struct bpf_prog *old_prog;
1171 int i;
1172
1173 old_prog = rtnl_dereference(tun->xdp_prog);
1174 rcu_assign_pointer(tun->xdp_prog, prog);
1175 if (old_prog)
1176 bpf_prog_put(old_prog);
1177
1178 for (i = 0; i < tun->numqueues; i++) {
1179 tfile = rtnl_dereference(tun->tfiles[i]);
1180 if (prog)
1181 sock_set_flag(&tfile->sk, SOCK_XDP);
1182 else
1183 sock_reset_flag(&tfile->sk, SOCK_XDP);
1184 }
1185 list_for_each_entry(tfile, &tun->disabled, next) {
1186 if (prog)
1187 sock_set_flag(&tfile->sk, SOCK_XDP);
1188 else
1189 sock_reset_flag(&tfile->sk, SOCK_XDP);
1190 }
1191
1192 return 0;
1193}
1194
1195static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1196{
1197 switch (xdp->command) {
1198 case XDP_SETUP_PROG:
1199 return tun_xdp_set(dev, xdp->prog, xdp->extack);
1200 default:
1201 return -EINVAL;
1202 }
1203}
1204
1205static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
1206{
1207 if (new_carrier) {
1208 struct tun_struct *tun = netdev_priv(dev);
1209
1210 if (!tun->numqueues)
1211 return -EPERM;
1212
1213 netif_carrier_on(dev);
1214 } else {
1215 netif_carrier_off(dev);
1216 }
1217 return 0;
1218}
1219
1220static const struct net_device_ops tun_netdev_ops = {
1221 .ndo_init = tun_net_init,
1222 .ndo_uninit = tun_net_uninit,
1223 .ndo_open = tun_net_open,
1224 .ndo_stop = tun_net_close,
1225 .ndo_start_xmit = tun_net_xmit,
1226 .ndo_fix_features = tun_net_fix_features,
1227 .ndo_select_queue = tun_select_queue,
1228 .ndo_set_rx_headroom = tun_set_headroom,
1229 .ndo_get_stats64 = tun_net_get_stats64,
1230 .ndo_change_carrier = tun_net_change_carrier,
1231};
1232
1233static void __tun_xdp_flush_tfile(struct tun_file *tfile)
1234{
1235
1236 if (tfile->flags & TUN_FASYNC)
1237 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1238 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1239}
1240
1241static int tun_xdp_xmit(struct net_device *dev, int n,
1242 struct xdp_frame **frames, u32 flags)
1243{
1244 struct tun_struct *tun = netdev_priv(dev);
1245 struct tun_file *tfile;
1246 u32 numqueues;
1247 int nxmit = 0;
1248 int i;
1249
1250 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1251 return -EINVAL;
1252
1253 rcu_read_lock();
1254
1255resample:
1256 numqueues = READ_ONCE(tun->numqueues);
1257 if (!numqueues) {
1258 rcu_read_unlock();
1259 return -ENXIO;
1260 }
1261
1262 tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1263 numqueues]);
1264 if (unlikely(!tfile))
1265 goto resample;
1266
1267 spin_lock(&tfile->tx_ring.producer_lock);
1268 for (i = 0; i < n; i++) {
1269 struct xdp_frame *xdp = frames[i];
1270
1271
1272
1273 void *frame = tun_xdp_to_ptr(xdp);
1274
1275 if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
1276 atomic_long_inc(&dev->tx_dropped);
1277 break;
1278 }
1279 nxmit++;
1280 }
1281 spin_unlock(&tfile->tx_ring.producer_lock);
1282
1283 if (flags & XDP_XMIT_FLUSH)
1284 __tun_xdp_flush_tfile(tfile);
1285
1286 rcu_read_unlock();
1287 return nxmit;
1288}
1289
1290static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
1291{
1292 struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
1293 int nxmit;
1294
1295 if (unlikely(!frame))
1296 return -EOVERFLOW;
1297
1298 nxmit = tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
1299 if (!nxmit)
1300 xdp_return_frame_rx_napi(frame);
1301 return nxmit;
1302}
1303
1304static const struct net_device_ops tap_netdev_ops = {
1305 .ndo_init = tun_net_init,
1306 .ndo_uninit = tun_net_uninit,
1307 .ndo_open = tun_net_open,
1308 .ndo_stop = tun_net_close,
1309 .ndo_start_xmit = tun_net_xmit,
1310 .ndo_fix_features = tun_net_fix_features,
1311 .ndo_set_rx_mode = tun_net_mclist,
1312 .ndo_set_mac_address = eth_mac_addr,
1313 .ndo_validate_addr = eth_validate_addr,
1314 .ndo_select_queue = tun_select_queue,
1315 .ndo_features_check = passthru_features_check,
1316 .ndo_set_rx_headroom = tun_set_headroom,
1317 .ndo_get_stats64 = dev_get_tstats64,
1318 .ndo_bpf = tun_xdp,
1319 .ndo_xdp_xmit = tun_xdp_xmit,
1320 .ndo_change_carrier = tun_net_change_carrier,
1321};
1322
1323static void tun_flow_init(struct tun_struct *tun)
1324{
1325 int i;
1326
1327 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
1328 INIT_HLIST_HEAD(&tun->flows[i]);
1329
1330 tun->ageing_time = TUN_FLOW_EXPIRE;
1331 timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1332 mod_timer(&tun->flow_gc_timer,
1333 round_jiffies_up(jiffies + tun->ageing_time));
1334}
1335
1336static void tun_flow_uninit(struct tun_struct *tun)
1337{
1338 del_timer_sync(&tun->flow_gc_timer);
1339 tun_flow_flush(tun);
1340}
1341
1342#define MIN_MTU 68
1343#define MAX_MTU 65535
1344
1345
1346static void tun_net_initialize(struct net_device *dev)
1347{
1348 struct tun_struct *tun = netdev_priv(dev);
1349
1350 switch (tun->flags & TUN_TYPE_MASK) {
1351 case IFF_TUN:
1352 dev->netdev_ops = &tun_netdev_ops;
1353 dev->header_ops = &ip_tunnel_header_ops;
1354
1355
1356 dev->hard_header_len = 0;
1357 dev->addr_len = 0;
1358 dev->mtu = 1500;
1359
1360
1361 dev->type = ARPHRD_NONE;
1362 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1363 break;
1364
1365 case IFF_TAP:
1366 dev->netdev_ops = &tap_netdev_ops;
1367
1368 ether_setup(dev);
1369 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1370 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1371
1372 eth_hw_addr_random(dev);
1373
1374 break;
1375 }
1376
1377 dev->min_mtu = MIN_MTU;
1378 dev->max_mtu = MAX_MTU - dev->hard_header_len;
1379}
1380
1381static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
1382{
1383 struct sock *sk = tfile->socket.sk;
1384
1385 return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
1386}
1387
1388
1389
1390
1391static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
1392{
1393 struct tun_file *tfile = file->private_data;
1394 struct tun_struct *tun = tun_get(tfile);
1395 struct sock *sk;
1396 __poll_t mask = 0;
1397
1398 if (!tun)
1399 return EPOLLERR;
1400
1401 sk = tfile->socket.sk;
1402
1403 poll_wait(file, sk_sleep(sk), wait);
1404
1405 if (!ptr_ring_empty(&tfile->tx_ring))
1406 mask |= EPOLLIN | EPOLLRDNORM;
1407
1408
1409
1410
1411
1412
1413 if (tun_sock_writeable(tun, tfile) ||
1414 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1415 tun_sock_writeable(tun, tfile)))
1416 mask |= EPOLLOUT | EPOLLWRNORM;
1417
1418 if (tun->dev->reg_state != NETREG_REGISTERED)
1419 mask = EPOLLERR;
1420
1421 tun_put(tun);
1422 return mask;
1423}
1424
1425static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
1426 size_t len,
1427 const struct iov_iter *it)
1428{
1429 struct sk_buff *skb;
1430 size_t linear;
1431 int err;
1432 int i;
1433
1434 if (it->nr_segs > MAX_SKB_FRAGS + 1)
1435 return ERR_PTR(-EMSGSIZE);
1436
1437 local_bh_disable();
1438 skb = napi_get_frags(&tfile->napi);
1439 local_bh_enable();
1440 if (!skb)
1441 return ERR_PTR(-ENOMEM);
1442
1443 linear = iov_iter_single_seg_count(it);
1444 err = __skb_grow(skb, linear);
1445 if (err)
1446 goto free;
1447
1448 skb->len = len;
1449 skb->data_len = len - linear;
1450 skb->truesize += skb->data_len;
1451
1452 for (i = 1; i < it->nr_segs; i++) {
1453 size_t fragsz = it->iov[i].iov_len;
1454 struct page *page;
1455 void *frag;
1456
1457 if (fragsz == 0 || fragsz > PAGE_SIZE) {
1458 err = -EINVAL;
1459 goto free;
1460 }
1461 frag = netdev_alloc_frag(fragsz);
1462 if (!frag) {
1463 err = -ENOMEM;
1464 goto free;
1465 }
1466 page = virt_to_head_page(frag);
1467 skb_fill_page_desc(skb, i - 1, page,
1468 frag - page_address(page), fragsz);
1469 }
1470
1471 return skb;
1472free:
1473
1474 napi_free_frags(&tfile->napi);
1475 return ERR_PTR(err);
1476}
1477
1478
1479
1480static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
1481 size_t prepad, size_t len,
1482 size_t linear, int noblock)
1483{
1484 struct sock *sk = tfile->socket.sk;
1485 struct sk_buff *skb;
1486 int err;
1487
1488
1489 if (prepad + len < PAGE_SIZE || !linear)
1490 linear = len;
1491
1492 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1493 &err, 0);
1494 if (!skb)
1495 return ERR_PTR(err);
1496
1497 skb_reserve(skb, prepad);
1498 skb_put(skb, linear);
1499 skb->data_len = len - linear;
1500 skb->len += len - linear;
1501
1502 return skb;
1503}
1504
1505static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
1506 struct sk_buff *skb, int more)
1507{
1508 struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1509 struct sk_buff_head process_queue;
1510 u32 rx_batched = tun->rx_batched;
1511 bool rcv = false;
1512
1513 if (!rx_batched || (!more && skb_queue_empty(queue))) {
1514 local_bh_disable();
1515 skb_record_rx_queue(skb, tfile->queue_index);
1516 netif_receive_skb(skb);
1517 local_bh_enable();
1518 return;
1519 }
1520
1521 spin_lock(&queue->lock);
1522 if (!more || skb_queue_len(queue) == rx_batched) {
1523 __skb_queue_head_init(&process_queue);
1524 skb_queue_splice_tail_init(queue, &process_queue);
1525 rcv = true;
1526 } else {
1527 __skb_queue_tail(queue, skb);
1528 }
1529 spin_unlock(&queue->lock);
1530
1531 if (rcv) {
1532 struct sk_buff *nskb;
1533
1534 local_bh_disable();
1535 while ((nskb = __skb_dequeue(&process_queue))) {
1536 skb_record_rx_queue(nskb, tfile->queue_index);
1537 netif_receive_skb(nskb);
1538 }
1539 skb_record_rx_queue(skb, tfile->queue_index);
1540 netif_receive_skb(skb);
1541 local_bh_enable();
1542 }
1543}
1544
1545static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
1546 int len, int noblock, bool zerocopy)
1547{
1548 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
1549 return false;
1550
1551 if (tfile->socket.sk->sk_sndbuf != INT_MAX)
1552 return false;
1553
1554 if (!noblock)
1555 return false;
1556
1557 if (zerocopy)
1558 return false;
1559
1560 if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
1561 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
1562 return false;
1563
1564 return true;
1565}
1566
1567static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
1568 struct page_frag *alloc_frag, char *buf,
1569 int buflen, int len, int pad)
1570{
1571 struct sk_buff *skb = build_skb(buf, buflen);
1572
1573 if (!skb)
1574 return ERR_PTR(-ENOMEM);
1575
1576 skb_reserve(skb, pad);
1577 skb_put(skb, len);
1578 skb_set_owner_w(skb, tfile->socket.sk);
1579
1580 get_page(alloc_frag->page);
1581 alloc_frag->offset += buflen;
1582
1583 return skb;
1584}
1585
1586static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
1587 struct xdp_buff *xdp, u32 act)
1588{
1589 int err;
1590
1591 switch (act) {
1592 case XDP_REDIRECT:
1593 err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
1594 if (err)
1595 return err;
1596 break;
1597 case XDP_TX:
1598 err = tun_xdp_tx(tun->dev, xdp);
1599 if (err < 0)
1600 return err;
1601 break;
1602 case XDP_PASS:
1603 break;
1604 default:
1605 bpf_warn_invalid_xdp_action(tun->dev, xdp_prog, act);
1606 fallthrough;
1607 case XDP_ABORTED:
1608 trace_xdp_exception(tun->dev, xdp_prog, act);
1609 fallthrough;
1610 case XDP_DROP:
1611 atomic_long_inc(&tun->dev->rx_dropped);
1612 break;
1613 }
1614
1615 return act;
1616}
1617
1618static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1619 struct tun_file *tfile,
1620 struct iov_iter *from,
1621 struct virtio_net_hdr *hdr,
1622 int len, int *skb_xdp)
1623{
1624 struct page_frag *alloc_frag = ¤t->task_frag;
1625 struct bpf_prog *xdp_prog;
1626 int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1627 char *buf;
1628 size_t copied;
1629 int pad = TUN_RX_PAD;
1630 int err = 0;
1631
1632 rcu_read_lock();
1633 xdp_prog = rcu_dereference(tun->xdp_prog);
1634 if (xdp_prog)
1635 pad += XDP_PACKET_HEADROOM;
1636 buflen += SKB_DATA_ALIGN(len + pad);
1637 rcu_read_unlock();
1638
1639 alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
1640 if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
1641 return ERR_PTR(-ENOMEM);
1642
1643 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1644 copied = copy_page_from_iter(alloc_frag->page,
1645 alloc_frag->offset + pad,
1646 len, from);
1647 if (copied != len)
1648 return ERR_PTR(-EFAULT);
1649
1650
1651
1652
1653
1654 if (hdr->gso_type || !xdp_prog) {
1655 *skb_xdp = 1;
1656 return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
1657 pad);
1658 }
1659
1660 *skb_xdp = 0;
1661
1662 local_bh_disable();
1663 rcu_read_lock();
1664 xdp_prog = rcu_dereference(tun->xdp_prog);
1665 if (xdp_prog) {
1666 struct xdp_buff xdp;
1667 u32 act;
1668
1669 xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq);
1670 xdp_prepare_buff(&xdp, buf, pad, len, false);
1671
1672 act = bpf_prog_run_xdp(xdp_prog, &xdp);
1673 if (act == XDP_REDIRECT || act == XDP_TX) {
1674 get_page(alloc_frag->page);
1675 alloc_frag->offset += buflen;
1676 }
1677 err = tun_xdp_act(tun, xdp_prog, &xdp, act);
1678 if (err < 0) {
1679 if (act == XDP_REDIRECT || act == XDP_TX)
1680 put_page(alloc_frag->page);
1681 goto out;
1682 }
1683
1684 if (err == XDP_REDIRECT)
1685 xdp_do_flush();
1686 if (err != XDP_PASS)
1687 goto out;
1688
1689 pad = xdp.data - xdp.data_hard_start;
1690 len = xdp.data_end - xdp.data;
1691 }
1692 rcu_read_unlock();
1693 local_bh_enable();
1694
1695 return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
1696
1697out:
1698 rcu_read_unlock();
1699 local_bh_enable();
1700 return NULL;
1701}
1702
1703
1704static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1705 void *msg_control, struct iov_iter *from,
1706 int noblock, bool more)
1707{
1708 struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1709 struct sk_buff *skb;
1710 size_t total_len = iov_iter_count(from);
1711 size_t len = total_len, align = tun->align, linear;
1712 struct virtio_net_hdr gso = { 0 };
1713 int good_linear;
1714 int copylen;
1715 bool zerocopy = false;
1716 int err;
1717 u32 rxhash = 0;
1718 int skb_xdp = 1;
1719 bool frags = tun_napi_frags_enabled(tfile);
1720
1721 if (!(tun->flags & IFF_NO_PI)) {
1722 if (len < sizeof(pi))
1723 return -EINVAL;
1724 len -= sizeof(pi);
1725
1726 if (!copy_from_iter_full(&pi, sizeof(pi), from))
1727 return -EFAULT;
1728 }
1729
1730 if (tun->flags & IFF_VNET_HDR) {
1731 int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1732
1733 if (len < vnet_hdr_sz)
1734 return -EINVAL;
1735 len -= vnet_hdr_sz;
1736
1737 if (!copy_from_iter_full(&gso, sizeof(gso), from))
1738 return -EFAULT;
1739
1740 if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1741 tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
1742 gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
1743
1744 if (tun16_to_cpu(tun, gso.hdr_len) > len)
1745 return -EINVAL;
1746 iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
1747 }
1748
1749 if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1750 align += NET_IP_ALIGN;
1751 if (unlikely(len < ETH_HLEN ||
1752 (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1753 return -EINVAL;
1754 }
1755
1756 good_linear = SKB_MAX_HEAD(align);
1757
1758 if (msg_control) {
1759 struct iov_iter i = *from;
1760
1761
1762
1763
1764
1765 copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
1766 if (copylen > good_linear)
1767 copylen = good_linear;
1768 linear = copylen;
1769 iov_iter_advance(&i, copylen);
1770 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
1771 zerocopy = true;
1772 }
1773
1774 if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
1775
1776
1777
1778
1779 skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
1780 if (IS_ERR(skb)) {
1781 atomic_long_inc(&tun->dev->rx_dropped);
1782 return PTR_ERR(skb);
1783 }
1784 if (!skb)
1785 return total_len;
1786 } else {
1787 if (!zerocopy) {
1788 copylen = len;
1789 if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
1790 linear = good_linear;
1791 else
1792 linear = tun16_to_cpu(tun, gso.hdr_len);
1793 }
1794
1795 if (frags) {
1796 mutex_lock(&tfile->napi_mutex);
1797 skb = tun_napi_alloc_frags(tfile, copylen, from);
1798
1799
1800
1801
1802 zerocopy = false;
1803 } else {
1804 skb = tun_alloc_skb(tfile, align, copylen, linear,
1805 noblock);
1806 }
1807
1808 if (IS_ERR(skb)) {
1809 if (PTR_ERR(skb) != -EAGAIN)
1810 atomic_long_inc(&tun->dev->rx_dropped);
1811 if (frags)
1812 mutex_unlock(&tfile->napi_mutex);
1813 return PTR_ERR(skb);
1814 }
1815
1816 if (zerocopy)
1817 err = zerocopy_sg_from_iter(skb, from);
1818 else
1819 err = skb_copy_datagram_from_iter(skb, 0, from, len);
1820
1821 if (err) {
1822 err = -EFAULT;
1823drop:
1824 atomic_long_inc(&tun->dev->rx_dropped);
1825 kfree_skb(skb);
1826 if (frags) {
1827 tfile->napi.skb = NULL;
1828 mutex_unlock(&tfile->napi_mutex);
1829 }
1830
1831 return err;
1832 }
1833 }
1834
1835 if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1836 atomic_long_inc(&tun->rx_frame_errors);
1837 kfree_skb(skb);
1838 if (frags) {
1839 tfile->napi.skb = NULL;
1840 mutex_unlock(&tfile->napi_mutex);
1841 }
1842
1843 return -EINVAL;
1844 }
1845
1846 switch (tun->flags & TUN_TYPE_MASK) {
1847 case IFF_TUN:
1848 if (tun->flags & IFF_NO_PI) {
1849 u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
1850
1851 switch (ip_version) {
1852 case 4:
1853 pi.proto = htons(ETH_P_IP);
1854 break;
1855 case 6:
1856 pi.proto = htons(ETH_P_IPV6);
1857 break;
1858 default:
1859 atomic_long_inc(&tun->dev->rx_dropped);
1860 kfree_skb(skb);
1861 return -EINVAL;
1862 }
1863 }
1864
1865 skb_reset_mac_header(skb);
1866 skb->protocol = pi.proto;
1867 skb->dev = tun->dev;
1868 break;
1869 case IFF_TAP:
1870 if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
1871 err = -ENOMEM;
1872 goto drop;
1873 }
1874 skb->protocol = eth_type_trans(skb, tun->dev);
1875 break;
1876 }
1877
1878
1879 if (zerocopy) {
1880 skb_zcopy_init(skb, msg_control);
1881 } else if (msg_control) {
1882 struct ubuf_info *uarg = msg_control;
1883 uarg->callback(NULL, uarg, false);
1884 }
1885
1886 skb_reset_network_header(skb);
1887 skb_probe_transport_header(skb);
1888 skb_record_rx_queue(skb, tfile->queue_index);
1889
1890 if (skb_xdp) {
1891 struct bpf_prog *xdp_prog;
1892 int ret;
1893
1894 local_bh_disable();
1895 rcu_read_lock();
1896 xdp_prog = rcu_dereference(tun->xdp_prog);
1897 if (xdp_prog) {
1898 ret = do_xdp_generic(xdp_prog, skb);
1899 if (ret != XDP_PASS) {
1900 rcu_read_unlock();
1901 local_bh_enable();
1902 if (frags) {
1903 tfile->napi.skb = NULL;
1904 mutex_unlock(&tfile->napi_mutex);
1905 }
1906 return total_len;
1907 }
1908 }
1909 rcu_read_unlock();
1910 local_bh_enable();
1911 }
1912
1913
1914
1915
1916
1917 if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
1918 !tfile->detached)
1919 rxhash = __skb_get_hash_symmetric(skb);
1920
1921 rcu_read_lock();
1922 if (unlikely(!(tun->dev->flags & IFF_UP))) {
1923 err = -EIO;
1924 rcu_read_unlock();
1925 goto drop;
1926 }
1927
1928 if (frags) {
1929 u32 headlen;
1930
1931
1932 skb_push(skb, ETH_HLEN);
1933 headlen = eth_get_headlen(tun->dev, skb->data,
1934 skb_headlen(skb));
1935
1936 if (unlikely(headlen > skb_headlen(skb))) {
1937 atomic_long_inc(&tun->dev->rx_dropped);
1938 napi_free_frags(&tfile->napi);
1939 rcu_read_unlock();
1940 mutex_unlock(&tfile->napi_mutex);
1941 WARN_ON(1);
1942 return -ENOMEM;
1943 }
1944
1945 local_bh_disable();
1946 napi_gro_frags(&tfile->napi);
1947 local_bh_enable();
1948 mutex_unlock(&tfile->napi_mutex);
1949 } else if (tfile->napi_enabled) {
1950 struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1951 int queue_len;
1952
1953 spin_lock_bh(&queue->lock);
1954 __skb_queue_tail(queue, skb);
1955 queue_len = skb_queue_len(queue);
1956 spin_unlock(&queue->lock);
1957
1958 if (!more || queue_len > NAPI_POLL_WEIGHT)
1959 napi_schedule(&tfile->napi);
1960
1961 local_bh_enable();
1962 } else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
1963 tun_rx_batched(tun, tfile, skb, more);
1964 } else {
1965 netif_rx_ni(skb);
1966 }
1967 rcu_read_unlock();
1968
1969 preempt_disable();
1970 dev_sw_netstats_rx_add(tun->dev, len);
1971 preempt_enable();
1972
1973 if (rxhash)
1974 tun_flow_update(tun, rxhash, tfile);
1975
1976 return total_len;
1977}
1978
1979static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
1980{
1981 struct file *file = iocb->ki_filp;
1982 struct tun_file *tfile = file->private_data;
1983 struct tun_struct *tun = tun_get(tfile);
1984 ssize_t result;
1985 int noblock = 0;
1986
1987 if (!tun)
1988 return -EBADFD;
1989
1990 if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
1991 noblock = 1;
1992
1993 result = tun_get_user(tun, tfile, NULL, from, noblock, false);
1994
1995 tun_put(tun);
1996 return result;
1997}
1998
1999static ssize_t tun_put_user_xdp(struct tun_struct *tun,
2000 struct tun_file *tfile,
2001 struct xdp_frame *xdp_frame,
2002 struct iov_iter *iter)
2003{
2004 int vnet_hdr_sz = 0;
2005 size_t size = xdp_frame->len;
2006 size_t ret;
2007
2008 if (tun->flags & IFF_VNET_HDR) {
2009 struct virtio_net_hdr gso = { 0 };
2010
2011 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2012 if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
2013 return -EINVAL;
2014 if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
2015 sizeof(gso)))
2016 return -EFAULT;
2017 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2018 }
2019
2020 ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
2021
2022 preempt_disable();
2023 dev_sw_netstats_tx_add(tun->dev, 1, ret);
2024 preempt_enable();
2025
2026 return ret;
2027}
2028
2029
2030static ssize_t tun_put_user(struct tun_struct *tun,
2031 struct tun_file *tfile,
2032 struct sk_buff *skb,
2033 struct iov_iter *iter)
2034{
2035 struct tun_pi pi = { 0, skb->protocol };
2036 ssize_t total;
2037 int vlan_offset = 0;
2038 int vlan_hlen = 0;
2039 int vnet_hdr_sz = 0;
2040
2041 if (skb_vlan_tag_present(skb))
2042 vlan_hlen = VLAN_HLEN;
2043
2044 if (tun->flags & IFF_VNET_HDR)
2045 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2046
2047 total = skb->len + vlan_hlen + vnet_hdr_sz;
2048
2049 if (!(tun->flags & IFF_NO_PI)) {
2050 if (iov_iter_count(iter) < sizeof(pi))
2051 return -EINVAL;
2052
2053 total += sizeof(pi);
2054 if (iov_iter_count(iter) < total) {
2055
2056 pi.flags |= TUN_PKT_STRIP;
2057 }
2058
2059 if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
2060 return -EFAULT;
2061 }
2062
2063 if (vnet_hdr_sz) {
2064 struct virtio_net_hdr gso;
2065
2066 if (iov_iter_count(iter) < vnet_hdr_sz)
2067 return -EINVAL;
2068
2069 if (virtio_net_hdr_from_skb(skb, &gso,
2070 tun_is_little_endian(tun), true,
2071 vlan_hlen)) {
2072 struct skb_shared_info *sinfo = skb_shinfo(skb);
2073 pr_err("unexpected GSO type: "
2074 "0x%x, gso_size %d, hdr_len %d\n",
2075 sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
2076 tun16_to_cpu(tun, gso.hdr_len));
2077 print_hex_dump(KERN_ERR, "tun: ",
2078 DUMP_PREFIX_NONE,
2079 16, 1, skb->head,
2080 min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2081 WARN_ON_ONCE(1);
2082 return -EINVAL;
2083 }
2084
2085 if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
2086 return -EFAULT;
2087
2088 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2089 }
2090
2091 if (vlan_hlen) {
2092 int ret;
2093 struct veth veth;
2094
2095 veth.h_vlan_proto = skb->vlan_proto;
2096 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
2097
2098 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
2099
2100 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
2101 if (ret || !iov_iter_count(iter))
2102 goto done;
2103
2104 ret = copy_to_iter(&veth, sizeof(veth), iter);
2105 if (ret != sizeof(veth) || !iov_iter_count(iter))
2106 goto done;
2107 }
2108
2109 skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
2110
2111done:
2112
2113 preempt_disable();
2114 dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen);
2115 preempt_enable();
2116
2117 return total;
2118}
2119
2120static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
2121{
2122 DECLARE_WAITQUEUE(wait, current);
2123 void *ptr = NULL;
2124 int error = 0;
2125
2126 ptr = ptr_ring_consume(&tfile->tx_ring);
2127 if (ptr)
2128 goto out;
2129 if (noblock) {
2130 error = -EAGAIN;
2131 goto out;
2132 }
2133
2134 add_wait_queue(&tfile->socket.wq.wait, &wait);
2135
2136 while (1) {
2137 set_current_state(TASK_INTERRUPTIBLE);
2138 ptr = ptr_ring_consume(&tfile->tx_ring);
2139 if (ptr)
2140 break;
2141 if (signal_pending(current)) {
2142 error = -ERESTARTSYS;
2143 break;
2144 }
2145 if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
2146 error = -EFAULT;
2147 break;
2148 }
2149
2150 schedule();
2151 }
2152
2153 __set_current_state(TASK_RUNNING);
2154 remove_wait_queue(&tfile->socket.wq.wait, &wait);
2155
2156out:
2157 *err = error;
2158 return ptr;
2159}
2160
2161static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
2162 struct iov_iter *to,
2163 int noblock, void *ptr)
2164{
2165 ssize_t ret;
2166 int err;
2167
2168 if (!iov_iter_count(to)) {
2169 tun_ptr_free(ptr);
2170 return 0;
2171 }
2172
2173 if (!ptr) {
2174
2175 ptr = tun_ring_recv(tfile, noblock, &err);
2176 if (!ptr)
2177 return err;
2178 }
2179
2180 if (tun_is_xdp_frame(ptr)) {
2181 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2182
2183 ret = tun_put_user_xdp(tun, tfile, xdpf, to);
2184 xdp_return_frame(xdpf);
2185 } else {
2186 struct sk_buff *skb = ptr;
2187
2188 ret = tun_put_user(tun, tfile, skb, to);
2189 if (unlikely(ret < 0))
2190 kfree_skb(skb);
2191 else
2192 consume_skb(skb);
2193 }
2194
2195 return ret;
2196}
2197
2198static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
2199{
2200 struct file *file = iocb->ki_filp;
2201 struct tun_file *tfile = file->private_data;
2202 struct tun_struct *tun = tun_get(tfile);
2203 ssize_t len = iov_iter_count(to), ret;
2204 int noblock = 0;
2205
2206 if (!tun)
2207 return -EBADFD;
2208
2209 if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
2210 noblock = 1;
2211
2212 ret = tun_do_read(tun, tfile, to, noblock, NULL);
2213 ret = min_t(ssize_t, ret, len);
2214 if (ret > 0)
2215 iocb->ki_pos = ret;
2216 tun_put(tun);
2217 return ret;
2218}
2219
2220static void tun_prog_free(struct rcu_head *rcu)
2221{
2222 struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
2223
2224 bpf_prog_destroy(prog->prog);
2225 kfree(prog);
2226}
2227
2228static int __tun_set_ebpf(struct tun_struct *tun,
2229 struct tun_prog __rcu **prog_p,
2230 struct bpf_prog *prog)
2231{
2232 struct tun_prog *old, *new = NULL;
2233
2234 if (prog) {
2235 new = kmalloc(sizeof(*new), GFP_KERNEL);
2236 if (!new)
2237 return -ENOMEM;
2238 new->prog = prog;
2239 }
2240
2241 spin_lock_bh(&tun->lock);
2242 old = rcu_dereference_protected(*prog_p,
2243 lockdep_is_held(&tun->lock));
2244 rcu_assign_pointer(*prog_p, new);
2245 spin_unlock_bh(&tun->lock);
2246
2247 if (old)
2248 call_rcu(&old->rcu, tun_prog_free);
2249
2250 return 0;
2251}
2252
2253static void tun_free_netdev(struct net_device *dev)
2254{
2255 struct tun_struct *tun = netdev_priv(dev);
2256
2257 BUG_ON(!(list_empty(&tun->disabled)));
2258
2259 free_percpu(dev->tstats);
2260 tun_flow_uninit(tun);
2261 security_tun_dev_free_security(tun->security);
2262 __tun_set_ebpf(tun, &tun->steering_prog, NULL);
2263 __tun_set_ebpf(tun, &tun->filter_prog, NULL);
2264}
2265
2266static void tun_setup(struct net_device *dev)
2267{
2268 struct tun_struct *tun = netdev_priv(dev);
2269
2270 tun->owner = INVALID_UID;
2271 tun->group = INVALID_GID;
2272 tun_default_link_ksettings(dev, &tun->link_ksettings);
2273
2274 dev->ethtool_ops = &tun_ethtool_ops;
2275 dev->needs_free_netdev = true;
2276 dev->priv_destructor = tun_free_netdev;
2277
2278 dev->tx_queue_len = TUN_READQ_SIZE;
2279}
2280
2281
2282
2283
2284static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
2285 struct netlink_ext_ack *extack)
2286{
2287 NL_SET_ERR_MSG(extack,
2288 "tun/tap creation via rtnetlink is not supported.");
2289 return -EOPNOTSUPP;
2290}
2291
2292static size_t tun_get_size(const struct net_device *dev)
2293{
2294 BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
2295 BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
2296
2297 return nla_total_size(sizeof(uid_t)) +
2298 nla_total_size(sizeof(gid_t)) +
2299 nla_total_size(sizeof(u8)) +
2300 nla_total_size(sizeof(u8)) +
2301 nla_total_size(sizeof(u8)) +
2302 nla_total_size(sizeof(u8)) +
2303 nla_total_size(sizeof(u8)) +
2304 nla_total_size(sizeof(u32)) +
2305 nla_total_size(sizeof(u32)) +
2306 0;
2307}
2308
2309static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
2310{
2311 struct tun_struct *tun = netdev_priv(dev);
2312
2313 if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
2314 goto nla_put_failure;
2315 if (uid_valid(tun->owner) &&
2316 nla_put_u32(skb, IFLA_TUN_OWNER,
2317 from_kuid_munged(current_user_ns(), tun->owner)))
2318 goto nla_put_failure;
2319 if (gid_valid(tun->group) &&
2320 nla_put_u32(skb, IFLA_TUN_GROUP,
2321 from_kgid_munged(current_user_ns(), tun->group)))
2322 goto nla_put_failure;
2323 if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
2324 goto nla_put_failure;
2325 if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
2326 goto nla_put_failure;
2327 if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
2328 goto nla_put_failure;
2329 if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
2330 !!(tun->flags & IFF_MULTI_QUEUE)))
2331 goto nla_put_failure;
2332 if (tun->flags & IFF_MULTI_QUEUE) {
2333 if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
2334 goto nla_put_failure;
2335 if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
2336 tun->numdisabled))
2337 goto nla_put_failure;
2338 }
2339
2340 return 0;
2341
2342nla_put_failure:
2343 return -EMSGSIZE;
2344}
2345
2346static struct rtnl_link_ops tun_link_ops __read_mostly = {
2347 .kind = DRV_NAME,
2348 .priv_size = sizeof(struct tun_struct),
2349 .setup = tun_setup,
2350 .validate = tun_validate,
2351 .get_size = tun_get_size,
2352 .fill_info = tun_fill_info,
2353};
2354
2355static void tun_sock_write_space(struct sock *sk)
2356{
2357 struct tun_file *tfile;
2358 wait_queue_head_t *wqueue;
2359
2360 if (!sock_writeable(sk))
2361 return;
2362
2363 if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
2364 return;
2365
2366 wqueue = sk_sleep(sk);
2367 if (wqueue && waitqueue_active(wqueue))
2368 wake_up_interruptible_sync_poll(wqueue, EPOLLOUT |
2369 EPOLLWRNORM | EPOLLWRBAND);
2370
2371 tfile = container_of(sk, struct tun_file, sk);
2372 kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
2373}
2374
2375static void tun_put_page(struct tun_page *tpage)
2376{
2377 if (tpage->page)
2378 __page_frag_cache_drain(tpage->page, tpage->count);
2379}
2380
2381static int tun_xdp_one(struct tun_struct *tun,
2382 struct tun_file *tfile,
2383 struct xdp_buff *xdp, int *flush,
2384 struct tun_page *tpage)
2385{
2386 unsigned int datasize = xdp->data_end - xdp->data;
2387 struct tun_xdp_hdr *hdr = xdp->data_hard_start;
2388 struct virtio_net_hdr *gso = &hdr->gso;
2389 struct bpf_prog *xdp_prog;
2390 struct sk_buff *skb = NULL;
2391 u32 rxhash = 0, act;
2392 int buflen = hdr->buflen;
2393 int err = 0;
2394 bool skb_xdp = false;
2395 struct page *page;
2396
2397 xdp_prog = rcu_dereference(tun->xdp_prog);
2398 if (xdp_prog) {
2399 if (gso->gso_type) {
2400 skb_xdp = true;
2401 goto build;
2402 }
2403
2404 xdp_init_buff(xdp, buflen, &tfile->xdp_rxq);
2405 xdp_set_data_meta_invalid(xdp);
2406
2407 act = bpf_prog_run_xdp(xdp_prog, xdp);
2408 err = tun_xdp_act(tun, xdp_prog, xdp, act);
2409 if (err < 0) {
2410 put_page(virt_to_head_page(xdp->data));
2411 return err;
2412 }
2413
2414 switch (err) {
2415 case XDP_REDIRECT:
2416 *flush = true;
2417 fallthrough;
2418 case XDP_TX:
2419 return 0;
2420 case XDP_PASS:
2421 break;
2422 default:
2423 page = virt_to_head_page(xdp->data);
2424 if (tpage->page == page) {
2425 ++tpage->count;
2426 } else {
2427 tun_put_page(tpage);
2428 tpage->page = page;
2429 tpage->count = 1;
2430 }
2431 return 0;
2432 }
2433 }
2434
2435build:
2436 skb = build_skb(xdp->data_hard_start, buflen);
2437 if (!skb) {
2438 err = -ENOMEM;
2439 goto out;
2440 }
2441
2442 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2443 skb_put(skb, xdp->data_end - xdp->data);
2444
2445 if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
2446 atomic_long_inc(&tun->rx_frame_errors);
2447 kfree_skb(skb);
2448 err = -EINVAL;
2449 goto out;
2450 }
2451
2452 skb->protocol = eth_type_trans(skb, tun->dev);
2453 skb_reset_network_header(skb);
2454 skb_probe_transport_header(skb);
2455 skb_record_rx_queue(skb, tfile->queue_index);
2456
2457 if (skb_xdp) {
2458 err = do_xdp_generic(xdp_prog, skb);
2459 if (err != XDP_PASS)
2460 goto out;
2461 }
2462
2463 if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
2464 !tfile->detached)
2465 rxhash = __skb_get_hash_symmetric(skb);
2466
2467 netif_receive_skb(skb);
2468
2469
2470
2471
2472 dev_sw_netstats_rx_add(tun->dev, datasize);
2473
2474 if (rxhash)
2475 tun_flow_update(tun, rxhash, tfile);
2476
2477out:
2478 return err;
2479}
2480
2481static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
2482{
2483 int ret, i;
2484 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2485 struct tun_struct *tun = tun_get(tfile);
2486 struct tun_msg_ctl *ctl = m->msg_control;
2487 struct xdp_buff *xdp;
2488
2489 if (!tun)
2490 return -EBADFD;
2491
2492 if (ctl && (ctl->type == TUN_MSG_PTR)) {
2493 struct tun_page tpage;
2494 int n = ctl->num;
2495 int flush = 0;
2496
2497 memset(&tpage, 0, sizeof(tpage));
2498
2499 local_bh_disable();
2500 rcu_read_lock();
2501
2502 for (i = 0; i < n; i++) {
2503 xdp = &((struct xdp_buff *)ctl->ptr)[i];
2504 tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
2505 }
2506
2507 if (flush)
2508 xdp_do_flush();
2509
2510 rcu_read_unlock();
2511 local_bh_enable();
2512
2513 tun_put_page(&tpage);
2514
2515 ret = total_len;
2516 goto out;
2517 }
2518
2519 ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter,
2520 m->msg_flags & MSG_DONTWAIT,
2521 m->msg_flags & MSG_MORE);
2522out:
2523 tun_put(tun);
2524 return ret;
2525}
2526
2527static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
2528 int flags)
2529{
2530 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2531 struct tun_struct *tun = tun_get(tfile);
2532 void *ptr = m->msg_control;
2533 int ret;
2534
2535 if (!tun) {
2536 ret = -EBADFD;
2537 goto out_free;
2538 }
2539
2540 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
2541 ret = -EINVAL;
2542 goto out_put_tun;
2543 }
2544 if (flags & MSG_ERRQUEUE) {
2545 ret = sock_recv_errqueue(sock->sk, m, total_len,
2546 SOL_PACKET, TUN_TX_TIMESTAMP);
2547 goto out;
2548 }
2549 ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
2550 if (ret > (ssize_t)total_len) {
2551 m->msg_flags |= MSG_TRUNC;
2552 ret = flags & MSG_TRUNC ? ret : total_len;
2553 }
2554out:
2555 tun_put(tun);
2556 return ret;
2557
2558out_put_tun:
2559 tun_put(tun);
2560out_free:
2561 tun_ptr_free(ptr);
2562 return ret;
2563}
2564
2565static int tun_ptr_peek_len(void *ptr)
2566{
2567 if (likely(ptr)) {
2568 if (tun_is_xdp_frame(ptr)) {
2569 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2570
2571 return xdpf->len;
2572 }
2573 return __skb_array_len_with_tag(ptr);
2574 } else {
2575 return 0;
2576 }
2577}
2578
2579static int tun_peek_len(struct socket *sock)
2580{
2581 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2582 struct tun_struct *tun;
2583 int ret = 0;
2584
2585 tun = tun_get(tfile);
2586 if (!tun)
2587 return 0;
2588
2589 ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
2590 tun_put(tun);
2591
2592 return ret;
2593}
2594
2595
2596static const struct proto_ops tun_socket_ops = {
2597 .peek_len = tun_peek_len,
2598 .sendmsg = tun_sendmsg,
2599 .recvmsg = tun_recvmsg,
2600};
2601
2602static struct proto tun_proto = {
2603 .name = "tun",
2604 .owner = THIS_MODULE,
2605 .obj_size = sizeof(struct tun_file),
2606};
2607
2608static int tun_flags(struct tun_struct *tun)
2609{
2610 return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2611}
2612
2613static ssize_t tun_flags_show(struct device *dev, struct device_attribute *attr,
2614 char *buf)
2615{
2616 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2617 return sprintf(buf, "0x%x\n", tun_flags(tun));
2618}
2619
2620static ssize_t owner_show(struct device *dev, struct device_attribute *attr,
2621 char *buf)
2622{
2623 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2624 return uid_valid(tun->owner)?
2625 sprintf(buf, "%u\n",
2626 from_kuid_munged(current_user_ns(), tun->owner)):
2627 sprintf(buf, "-1\n");
2628}
2629
2630static ssize_t group_show(struct device *dev, struct device_attribute *attr,
2631 char *buf)
2632{
2633 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2634 return gid_valid(tun->group) ?
2635 sprintf(buf, "%u\n",
2636 from_kgid_munged(current_user_ns(), tun->group)):
2637 sprintf(buf, "-1\n");
2638}
2639
2640static DEVICE_ATTR_RO(tun_flags);
2641static DEVICE_ATTR_RO(owner);
2642static DEVICE_ATTR_RO(group);
2643
2644static struct attribute *tun_dev_attrs[] = {
2645 &dev_attr_tun_flags.attr,
2646 &dev_attr_owner.attr,
2647 &dev_attr_group.attr,
2648 NULL
2649};
2650
2651static const struct attribute_group tun_attr_group = {
2652 .attrs = tun_dev_attrs
2653};
2654
2655static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2656{
2657 struct tun_struct *tun;
2658 struct tun_file *tfile = file->private_data;
2659 struct net_device *dev;
2660 int err;
2661
2662 if (tfile->detached)
2663 return -EINVAL;
2664
2665 if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
2666 if (!capable(CAP_NET_ADMIN))
2667 return -EPERM;
2668
2669 if (!(ifr->ifr_flags & IFF_NAPI) ||
2670 (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
2671 return -EINVAL;
2672 }
2673
2674 dev = __dev_get_by_name(net, ifr->ifr_name);
2675 if (dev) {
2676 if (ifr->ifr_flags & IFF_TUN_EXCL)
2677 return -EBUSY;
2678 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
2679 tun = netdev_priv(dev);
2680 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
2681 tun = netdev_priv(dev);
2682 else
2683 return -EINVAL;
2684
2685 if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
2686 !!(tun->flags & IFF_MULTI_QUEUE))
2687 return -EINVAL;
2688
2689 if (tun_not_capable(tun))
2690 return -EPERM;
2691 err = security_tun_dev_open(tun->security);
2692 if (err < 0)
2693 return err;
2694
2695 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2696 ifr->ifr_flags & IFF_NAPI,
2697 ifr->ifr_flags & IFF_NAPI_FRAGS, true);
2698 if (err < 0)
2699 return err;
2700
2701 if (tun->flags & IFF_MULTI_QUEUE &&
2702 (tun->numqueues + tun->numdisabled > 1)) {
2703
2704
2705
2706 netdev_state_change(dev);
2707 return 0;
2708 }
2709
2710 tun->flags = (tun->flags & ~TUN_FEATURES) |
2711 (ifr->ifr_flags & TUN_FEATURES);
2712
2713 netdev_state_change(dev);
2714 } else {
2715 char *name;
2716 unsigned long flags = 0;
2717 int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
2718 MAX_TAP_QUEUES : 1;
2719
2720 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2721 return -EPERM;
2722 err = security_tun_dev_create();
2723 if (err < 0)
2724 return err;
2725
2726
2727 if (ifr->ifr_flags & IFF_TUN) {
2728
2729 flags |= IFF_TUN;
2730 name = "tun%d";
2731 } else if (ifr->ifr_flags & IFF_TAP) {
2732
2733 flags |= IFF_TAP;
2734 name = "tap%d";
2735 } else
2736 return -EINVAL;
2737
2738 if (*ifr->ifr_name)
2739 name = ifr->ifr_name;
2740
2741 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
2742 NET_NAME_UNKNOWN, tun_setup, queues,
2743 queues);
2744
2745 if (!dev)
2746 return -ENOMEM;
2747
2748 dev_net_set(dev, net);
2749 dev->rtnl_link_ops = &tun_link_ops;
2750 dev->ifindex = tfile->ifindex;
2751 dev->sysfs_groups[0] = &tun_attr_group;
2752
2753 tun = netdev_priv(dev);
2754 tun->dev = dev;
2755 tun->flags = flags;
2756 tun->txflt.count = 0;
2757 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
2758
2759 tun->align = NET_SKB_PAD;
2760 tun->filter_attached = false;
2761 tun->sndbuf = tfile->socket.sk->sk_sndbuf;
2762 tun->rx_batched = 0;
2763 RCU_INIT_POINTER(tun->steering_prog, NULL);
2764
2765 tun->ifr = ifr;
2766 tun->file = file;
2767
2768 tun_net_initialize(dev);
2769
2770 err = register_netdevice(tun->dev);
2771 if (err < 0) {
2772 free_netdev(dev);
2773 return err;
2774 }
2775
2776
2777
2778 rcu_assign_pointer(tfile->tun, tun);
2779 }
2780
2781 netif_carrier_on(tun->dev);
2782
2783
2784
2785
2786 if (netif_running(tun->dev))
2787 netif_tx_wake_all_queues(tun->dev);
2788
2789 strcpy(ifr->ifr_name, tun->dev->name);
2790 return 0;
2791}
2792
2793static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
2794{
2795 strcpy(ifr->ifr_name, tun->dev->name);
2796
2797 ifr->ifr_flags = tun_flags(tun);
2798
2799}
2800
2801
2802
2803static int set_offload(struct tun_struct *tun, unsigned long arg)
2804{
2805 netdev_features_t features = 0;
2806
2807 if (arg & TUN_F_CSUM) {
2808 features |= NETIF_F_HW_CSUM;
2809 arg &= ~TUN_F_CSUM;
2810
2811 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
2812 if (arg & TUN_F_TSO_ECN) {
2813 features |= NETIF_F_TSO_ECN;
2814 arg &= ~TUN_F_TSO_ECN;
2815 }
2816 if (arg & TUN_F_TSO4)
2817 features |= NETIF_F_TSO;
2818 if (arg & TUN_F_TSO6)
2819 features |= NETIF_F_TSO6;
2820 arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
2821 }
2822
2823 arg &= ~TUN_F_UFO;
2824 }
2825
2826
2827
2828 if (arg)
2829 return -EINVAL;
2830
2831 tun->set_features = features;
2832 tun->dev->wanted_features &= ~TUN_USER_FEATURES;
2833 tun->dev->wanted_features |= features;
2834 netdev_update_features(tun->dev);
2835
2836 return 0;
2837}
2838
2839static void tun_detach_filter(struct tun_struct *tun, int n)
2840{
2841 int i;
2842 struct tun_file *tfile;
2843
2844 for (i = 0; i < n; i++) {
2845 tfile = rtnl_dereference(tun->tfiles[i]);
2846 lock_sock(tfile->socket.sk);
2847 sk_detach_filter(tfile->socket.sk);
2848 release_sock(tfile->socket.sk);
2849 }
2850
2851 tun->filter_attached = false;
2852}
2853
2854static int tun_attach_filter(struct tun_struct *tun)
2855{
2856 int i, ret = 0;
2857 struct tun_file *tfile;
2858
2859 for (i = 0; i < tun->numqueues; i++) {
2860 tfile = rtnl_dereference(tun->tfiles[i]);
2861 lock_sock(tfile->socket.sk);
2862 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
2863 release_sock(tfile->socket.sk);
2864 if (ret) {
2865 tun_detach_filter(tun, i);
2866 return ret;
2867 }
2868 }
2869
2870 tun->filter_attached = true;
2871 return ret;
2872}
2873
2874static void tun_set_sndbuf(struct tun_struct *tun)
2875{
2876 struct tun_file *tfile;
2877 int i;
2878
2879 for (i = 0; i < tun->numqueues; i++) {
2880 tfile = rtnl_dereference(tun->tfiles[i]);
2881 tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2882 }
2883}
2884
2885static int tun_set_queue(struct file *file, struct ifreq *ifr)
2886{
2887 struct tun_file *tfile = file->private_data;
2888 struct tun_struct *tun;
2889 int ret = 0;
2890
2891 rtnl_lock();
2892
2893 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
2894 tun = tfile->detached;
2895 if (!tun) {
2896 ret = -EINVAL;
2897 goto unlock;
2898 }
2899 ret = security_tun_dev_attach_queue(tun->security);
2900 if (ret < 0)
2901 goto unlock;
2902 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
2903 tun->flags & IFF_NAPI_FRAGS, true);
2904 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
2905 tun = rtnl_dereference(tfile->tun);
2906 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
2907 ret = -EINVAL;
2908 else
2909 __tun_detach(tfile, false);
2910 } else
2911 ret = -EINVAL;
2912
2913 if (ret >= 0)
2914 netdev_state_change(tun->dev);
2915
2916unlock:
2917 rtnl_unlock();
2918 return ret;
2919}
2920
2921static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
2922 void __user *data)
2923{
2924 struct bpf_prog *prog;
2925 int fd;
2926
2927 if (copy_from_user(&fd, data, sizeof(fd)))
2928 return -EFAULT;
2929
2930 if (fd == -1) {
2931 prog = NULL;
2932 } else {
2933 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
2934 if (IS_ERR(prog))
2935 return PTR_ERR(prog);
2936 }
2937
2938 return __tun_set_ebpf(tun, prog_p, prog);
2939}
2940
2941
2942static unsigned char tun_get_addr_len(unsigned short type)
2943{
2944 switch (type) {
2945 case ARPHRD_IP6GRE:
2946 case ARPHRD_TUNNEL6:
2947 return sizeof(struct in6_addr);
2948 case ARPHRD_IPGRE:
2949 case ARPHRD_TUNNEL:
2950 case ARPHRD_SIT:
2951 return 4;
2952 case ARPHRD_ETHER:
2953 return ETH_ALEN;
2954 case ARPHRD_IEEE802154:
2955 case ARPHRD_IEEE802154_MONITOR:
2956 return IEEE802154_EXTENDED_ADDR_LEN;
2957 case ARPHRD_PHONET_PIPE:
2958 case ARPHRD_PPP:
2959 case ARPHRD_NONE:
2960 return 0;
2961 case ARPHRD_6LOWPAN:
2962 return EUI64_ADDR_LEN;
2963 case ARPHRD_FDDI:
2964 return FDDI_K_ALEN;
2965 case ARPHRD_HIPPI:
2966 return HIPPI_ALEN;
2967 case ARPHRD_IEEE802:
2968 return FC_ALEN;
2969 case ARPHRD_ROSE:
2970 return ROSE_ADDR_LEN;
2971 case ARPHRD_NETROM:
2972 return AX25_ADDR_LEN;
2973 case ARPHRD_LOCALTLK:
2974 return LTALK_ALEN;
2975 default:
2976 return 0;
2977 }
2978}
2979
2980static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
2981 unsigned long arg, int ifreq_len)
2982{
2983 struct tun_file *tfile = file->private_data;
2984 struct net *net = sock_net(&tfile->sk);
2985 struct tun_struct *tun;
2986 void __user* argp = (void __user*)arg;
2987 unsigned int ifindex, carrier;
2988 struct ifreq ifr;
2989 kuid_t owner;
2990 kgid_t group;
2991 int sndbuf;
2992 int vnet_hdr_sz;
2993 int le;
2994 int ret;
2995 bool do_notify = false;
2996
2997 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
2998 (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
2999 if (copy_from_user(&ifr, argp, ifreq_len))
3000 return -EFAULT;
3001 } else {
3002 memset(&ifr, 0, sizeof(ifr));
3003 }
3004 if (cmd == TUNGETFEATURES) {
3005
3006
3007
3008
3009 return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
3010 (unsigned int __user*)argp);
3011 } else if (cmd == TUNSETQUEUE) {
3012 return tun_set_queue(file, &ifr);
3013 } else if (cmd == SIOCGSKNS) {
3014 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3015 return -EPERM;
3016 return open_related_ns(&net->ns, get_net_ns);
3017 }
3018
3019 rtnl_lock();
3020
3021 tun = tun_get(tfile);
3022 if (cmd == TUNSETIFF) {
3023 ret = -EEXIST;
3024 if (tun)
3025 goto unlock;
3026
3027 ifr.ifr_name[IFNAMSIZ-1] = '\0';
3028
3029 ret = tun_set_iff(net, file, &ifr);
3030
3031 if (ret)
3032 goto unlock;
3033
3034 if (copy_to_user(argp, &ifr, ifreq_len))
3035 ret = -EFAULT;
3036 goto unlock;
3037 }
3038 if (cmd == TUNSETIFINDEX) {
3039 ret = -EPERM;
3040 if (tun)
3041 goto unlock;
3042
3043 ret = -EFAULT;
3044 if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
3045 goto unlock;
3046
3047 ret = 0;
3048 tfile->ifindex = ifindex;
3049 goto unlock;
3050 }
3051
3052 ret = -EBADFD;
3053 if (!tun)
3054 goto unlock;
3055
3056 netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd);
3057
3058 net = dev_net(tun->dev);
3059 ret = 0;
3060 switch (cmd) {
3061 case TUNGETIFF:
3062 tun_get_iff(tun, &ifr);
3063
3064 if (tfile->detached)
3065 ifr.ifr_flags |= IFF_DETACH_QUEUE;
3066 if (!tfile->socket.sk->sk_filter)
3067 ifr.ifr_flags |= IFF_NOFILTER;
3068
3069 if (copy_to_user(argp, &ifr, ifreq_len))
3070 ret = -EFAULT;
3071 break;
3072
3073 case TUNSETNOCSUM:
3074
3075
3076
3077 netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n",
3078 arg ? "disabled" : "enabled");
3079 break;
3080
3081 case TUNSETPERSIST:
3082
3083
3084
3085 if (arg && !(tun->flags & IFF_PERSIST)) {
3086 tun->flags |= IFF_PERSIST;
3087 __module_get(THIS_MODULE);
3088 do_notify = true;
3089 }
3090 if (!arg && (tun->flags & IFF_PERSIST)) {
3091 tun->flags &= ~IFF_PERSIST;
3092 module_put(THIS_MODULE);
3093 do_notify = true;
3094 }
3095
3096 netif_info(tun, drv, tun->dev, "persist %s\n",
3097 arg ? "enabled" : "disabled");
3098 break;
3099
3100 case TUNSETOWNER:
3101
3102 owner = make_kuid(current_user_ns(), arg);
3103 if (!uid_valid(owner)) {
3104 ret = -EINVAL;
3105 break;
3106 }
3107 tun->owner = owner;
3108 do_notify = true;
3109 netif_info(tun, drv, tun->dev, "owner set to %u\n",
3110 from_kuid(&init_user_ns, tun->owner));
3111 break;
3112
3113 case TUNSETGROUP:
3114
3115 group = make_kgid(current_user_ns(), arg);
3116 if (!gid_valid(group)) {
3117 ret = -EINVAL;
3118 break;
3119 }
3120 tun->group = group;
3121 do_notify = true;
3122 netif_info(tun, drv, tun->dev, "group set to %u\n",
3123 from_kgid(&init_user_ns, tun->group));
3124 break;
3125
3126 case TUNSETLINK:
3127
3128 if (tun->dev->flags & IFF_UP) {
3129 netif_info(tun, drv, tun->dev,
3130 "Linktype set failed because interface is up\n");
3131 ret = -EBUSY;
3132 } else {
3133 ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
3134 tun->dev);
3135 ret = notifier_to_errno(ret);
3136 if (ret) {
3137 netif_info(tun, drv, tun->dev,
3138 "Refused to change device type\n");
3139 break;
3140 }
3141 tun->dev->type = (int) arg;
3142 tun->dev->addr_len = tun_get_addr_len(tun->dev->type);
3143 netif_info(tun, drv, tun->dev, "linktype set to %d\n",
3144 tun->dev->type);
3145 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
3146 tun->dev);
3147 }
3148 break;
3149
3150 case TUNSETDEBUG:
3151 tun->msg_enable = (u32)arg;
3152 break;
3153
3154 case TUNSETOFFLOAD:
3155 ret = set_offload(tun, arg);
3156 break;
3157
3158 case TUNSETTXFILTER:
3159
3160 ret = -EINVAL;
3161 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3162 break;
3163 ret = update_filter(&tun->txflt, (void __user *)arg);
3164 break;
3165
3166 case SIOCGIFHWADDR:
3167
3168 dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name);
3169 if (copy_to_user(argp, &ifr, ifreq_len))
3170 ret = -EFAULT;
3171 break;
3172
3173 case SIOCSIFHWADDR:
3174
3175 ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL);
3176 break;
3177
3178 case TUNGETSNDBUF:
3179 sndbuf = tfile->socket.sk->sk_sndbuf;
3180 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
3181 ret = -EFAULT;
3182 break;
3183
3184 case TUNSETSNDBUF:
3185 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
3186 ret = -EFAULT;
3187 break;
3188 }
3189 if (sndbuf <= 0) {
3190 ret = -EINVAL;
3191 break;
3192 }
3193
3194 tun->sndbuf = sndbuf;
3195 tun_set_sndbuf(tun);
3196 break;
3197
3198 case TUNGETVNETHDRSZ:
3199 vnet_hdr_sz = tun->vnet_hdr_sz;
3200 if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
3201 ret = -EFAULT;
3202 break;
3203
3204 case TUNSETVNETHDRSZ:
3205 if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
3206 ret = -EFAULT;
3207 break;
3208 }
3209 if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
3210 ret = -EINVAL;
3211 break;
3212 }
3213
3214 tun->vnet_hdr_sz = vnet_hdr_sz;
3215 break;
3216
3217 case TUNGETVNETLE:
3218 le = !!(tun->flags & TUN_VNET_LE);
3219 if (put_user(le, (int __user *)argp))
3220 ret = -EFAULT;
3221 break;
3222
3223 case TUNSETVNETLE:
3224 if (get_user(le, (int __user *)argp)) {
3225 ret = -EFAULT;
3226 break;
3227 }
3228 if (le)
3229 tun->flags |= TUN_VNET_LE;
3230 else
3231 tun->flags &= ~TUN_VNET_LE;
3232 break;
3233
3234 case TUNGETVNETBE:
3235 ret = tun_get_vnet_be(tun, argp);
3236 break;
3237
3238 case TUNSETVNETBE:
3239 ret = tun_set_vnet_be(tun, argp);
3240 break;
3241
3242 case TUNATTACHFILTER:
3243
3244 ret = -EINVAL;
3245 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3246 break;
3247 ret = -EFAULT;
3248 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
3249 break;
3250
3251 ret = tun_attach_filter(tun);
3252 break;
3253
3254 case TUNDETACHFILTER:
3255
3256 ret = -EINVAL;
3257 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3258 break;
3259 ret = 0;
3260 tun_detach_filter(tun, tun->numqueues);
3261 break;
3262
3263 case TUNGETFILTER:
3264 ret = -EINVAL;
3265 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3266 break;
3267 ret = -EFAULT;
3268 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
3269 break;
3270 ret = 0;
3271 break;
3272
3273 case TUNSETSTEERINGEBPF:
3274 ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
3275 break;
3276
3277 case TUNSETFILTEREBPF:
3278 ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3279 break;
3280
3281 case TUNSETCARRIER:
3282 ret = -EFAULT;
3283 if (copy_from_user(&carrier, argp, sizeof(carrier)))
3284 goto unlock;
3285
3286 ret = tun_net_change_carrier(tun->dev, (bool)carrier);
3287 break;
3288
3289 case TUNGETDEVNETNS:
3290 ret = -EPERM;
3291 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3292 goto unlock;
3293 ret = open_related_ns(&net->ns, get_net_ns);
3294 break;
3295
3296 default:
3297 ret = -EINVAL;
3298 break;
3299 }
3300
3301 if (do_notify)
3302 netdev_state_change(tun->dev);
3303
3304unlock:
3305 rtnl_unlock();
3306 if (tun)
3307 tun_put(tun);
3308 return ret;
3309}
3310
3311static long tun_chr_ioctl(struct file *file,
3312 unsigned int cmd, unsigned long arg)
3313{
3314 return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
3315}
3316
3317#ifdef CONFIG_COMPAT
3318static long tun_chr_compat_ioctl(struct file *file,
3319 unsigned int cmd, unsigned long arg)
3320{
3321 switch (cmd) {
3322 case TUNSETIFF:
3323 case TUNGETIFF:
3324 case TUNSETTXFILTER:
3325 case TUNGETSNDBUF:
3326 case TUNSETSNDBUF:
3327 case SIOCGIFHWADDR:
3328 case SIOCSIFHWADDR:
3329 arg = (unsigned long)compat_ptr(arg);
3330 break;
3331 default:
3332 arg = (compat_ulong_t)arg;
3333 break;
3334 }
3335
3336
3337
3338
3339
3340
3341
3342 return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
3343}
3344#endif
3345
3346static int tun_chr_fasync(int fd, struct file *file, int on)
3347{
3348 struct tun_file *tfile = file->private_data;
3349 int ret;
3350
3351 if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
3352 goto out;
3353
3354 if (on) {
3355 __f_setown(file, task_pid(current), PIDTYPE_TGID, 0);
3356 tfile->flags |= TUN_FASYNC;
3357 } else
3358 tfile->flags &= ~TUN_FASYNC;
3359 ret = 0;
3360out:
3361 return ret;
3362}
3363
3364static int tun_chr_open(struct inode *inode, struct file * file)
3365{
3366 struct net *net = current->nsproxy->net_ns;
3367 struct tun_file *tfile;
3368
3369 tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
3370 &tun_proto, 0);
3371 if (!tfile)
3372 return -ENOMEM;
3373 if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
3374 sk_free(&tfile->sk);
3375 return -ENOMEM;
3376 }
3377
3378 mutex_init(&tfile->napi_mutex);
3379 RCU_INIT_POINTER(tfile->tun, NULL);
3380 tfile->flags = 0;
3381 tfile->ifindex = 0;
3382
3383 init_waitqueue_head(&tfile->socket.wq.wait);
3384
3385 tfile->socket.file = file;
3386 tfile->socket.ops = &tun_socket_ops;
3387
3388 sock_init_data(&tfile->socket, &tfile->sk);
3389
3390 tfile->sk.sk_write_space = tun_sock_write_space;
3391 tfile->sk.sk_sndbuf = INT_MAX;
3392
3393 file->private_data = tfile;
3394 INIT_LIST_HEAD(&tfile->next);
3395
3396 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
3397
3398 return 0;
3399}
3400
3401static int tun_chr_close(struct inode *inode, struct file *file)
3402{
3403 struct tun_file *tfile = file->private_data;
3404
3405 tun_detach(tfile, true);
3406
3407 return 0;
3408}
3409
3410#ifdef CONFIG_PROC_FS
3411static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
3412{
3413 struct tun_file *tfile = file->private_data;
3414 struct tun_struct *tun;
3415 struct ifreq ifr;
3416
3417 memset(&ifr, 0, sizeof(ifr));
3418
3419 rtnl_lock();
3420 tun = tun_get(tfile);
3421 if (tun)
3422 tun_get_iff(tun, &ifr);
3423 rtnl_unlock();
3424
3425 if (tun)
3426 tun_put(tun);
3427
3428 seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
3429}
3430#endif
3431
3432static const struct file_operations tun_fops = {
3433 .owner = THIS_MODULE,
3434 .llseek = no_llseek,
3435 .read_iter = tun_chr_read_iter,
3436 .write_iter = tun_chr_write_iter,
3437 .poll = tun_chr_poll,
3438 .unlocked_ioctl = tun_chr_ioctl,
3439#ifdef CONFIG_COMPAT
3440 .compat_ioctl = tun_chr_compat_ioctl,
3441#endif
3442 .open = tun_chr_open,
3443 .release = tun_chr_close,
3444 .fasync = tun_chr_fasync,
3445#ifdef CONFIG_PROC_FS
3446 .show_fdinfo = tun_chr_show_fdinfo,
3447#endif
3448};
3449
3450static struct miscdevice tun_miscdev = {
3451 .minor = TUN_MINOR,
3452 .name = "tun",
3453 .nodename = "net/tun",
3454 .fops = &tun_fops,
3455};
3456
3457
3458
3459static void tun_default_link_ksettings(struct net_device *dev,
3460 struct ethtool_link_ksettings *cmd)
3461{
3462 ethtool_link_ksettings_zero_link_mode(cmd, supported);
3463 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
3464 cmd->base.speed = SPEED_10;
3465 cmd->base.duplex = DUPLEX_FULL;
3466 cmd->base.port = PORT_TP;
3467 cmd->base.phy_address = 0;
3468 cmd->base.autoneg = AUTONEG_DISABLE;
3469}
3470
3471static int tun_get_link_ksettings(struct net_device *dev,
3472 struct ethtool_link_ksettings *cmd)
3473{
3474 struct tun_struct *tun = netdev_priv(dev);
3475
3476 memcpy(cmd, &tun->link_ksettings, sizeof(*cmd));
3477 return 0;
3478}
3479
3480static int tun_set_link_ksettings(struct net_device *dev,
3481 const struct ethtool_link_ksettings *cmd)
3482{
3483 struct tun_struct *tun = netdev_priv(dev);
3484
3485 memcpy(&tun->link_ksettings, cmd, sizeof(*cmd));
3486 return 0;
3487}
3488
3489static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3490{
3491 struct tun_struct *tun = netdev_priv(dev);
3492
3493 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
3494 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
3495
3496 switch (tun->flags & TUN_TYPE_MASK) {
3497 case IFF_TUN:
3498 strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
3499 break;
3500 case IFF_TAP:
3501 strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
3502 break;
3503 }
3504}
3505
3506static u32 tun_get_msglevel(struct net_device *dev)
3507{
3508 struct tun_struct *tun = netdev_priv(dev);
3509
3510 return tun->msg_enable;
3511}
3512
3513static void tun_set_msglevel(struct net_device *dev, u32 value)
3514{
3515 struct tun_struct *tun = netdev_priv(dev);
3516
3517 tun->msg_enable = value;
3518}
3519
3520static int tun_get_coalesce(struct net_device *dev,
3521 struct ethtool_coalesce *ec,
3522 struct kernel_ethtool_coalesce *kernel_coal,
3523 struct netlink_ext_ack *extack)
3524{
3525 struct tun_struct *tun = netdev_priv(dev);
3526
3527 ec->rx_max_coalesced_frames = tun->rx_batched;
3528
3529 return 0;
3530}
3531
3532static int tun_set_coalesce(struct net_device *dev,
3533 struct ethtool_coalesce *ec,
3534 struct kernel_ethtool_coalesce *kernel_coal,
3535 struct netlink_ext_ack *extack)
3536{
3537 struct tun_struct *tun = netdev_priv(dev);
3538
3539 if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
3540 tun->rx_batched = NAPI_POLL_WEIGHT;
3541 else
3542 tun->rx_batched = ec->rx_max_coalesced_frames;
3543
3544 return 0;
3545}
3546
3547static const struct ethtool_ops tun_ethtool_ops = {
3548 .supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES,
3549 .get_drvinfo = tun_get_drvinfo,
3550 .get_msglevel = tun_get_msglevel,
3551 .set_msglevel = tun_set_msglevel,
3552 .get_link = ethtool_op_get_link,
3553 .get_ts_info = ethtool_op_get_ts_info,
3554 .get_coalesce = tun_get_coalesce,
3555 .set_coalesce = tun_set_coalesce,
3556 .get_link_ksettings = tun_get_link_ksettings,
3557 .set_link_ksettings = tun_set_link_ksettings,
3558};
3559
3560static int tun_queue_resize(struct tun_struct *tun)
3561{
3562 struct net_device *dev = tun->dev;
3563 struct tun_file *tfile;
3564 struct ptr_ring **rings;
3565 int n = tun->numqueues + tun->numdisabled;
3566 int ret, i;
3567
3568 rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
3569 if (!rings)
3570 return -ENOMEM;
3571
3572 for (i = 0; i < tun->numqueues; i++) {
3573 tfile = rtnl_dereference(tun->tfiles[i]);
3574 rings[i] = &tfile->tx_ring;
3575 }
3576 list_for_each_entry(tfile, &tun->disabled, next)
3577 rings[i++] = &tfile->tx_ring;
3578
3579 ret = ptr_ring_resize_multiple(rings, n,
3580 dev->tx_queue_len, GFP_KERNEL,
3581 tun_ptr_free);
3582
3583 kfree(rings);
3584 return ret;
3585}
3586
3587static int tun_device_event(struct notifier_block *unused,
3588 unsigned long event, void *ptr)
3589{
3590 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3591 struct tun_struct *tun = netdev_priv(dev);
3592 int i;
3593
3594 if (dev->rtnl_link_ops != &tun_link_ops)
3595 return NOTIFY_DONE;
3596
3597 switch (event) {
3598 case NETDEV_CHANGE_TX_QUEUE_LEN:
3599 if (tun_queue_resize(tun))
3600 return NOTIFY_BAD;
3601 break;
3602 case NETDEV_UP:
3603 for (i = 0; i < tun->numqueues; i++) {
3604 struct tun_file *tfile;
3605
3606 tfile = rtnl_dereference(tun->tfiles[i]);
3607 tfile->socket.sk->sk_write_space(tfile->socket.sk);
3608 }
3609 break;
3610 default:
3611 break;
3612 }
3613
3614 return NOTIFY_DONE;
3615}
3616
3617static struct notifier_block tun_notifier_block __read_mostly = {
3618 .notifier_call = tun_device_event,
3619};
3620
3621static int __init tun_init(void)
3622{
3623 int ret = 0;
3624
3625 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3626
3627 ret = rtnl_link_register(&tun_link_ops);
3628 if (ret) {
3629 pr_err("Can't register link_ops\n");
3630 goto err_linkops;
3631 }
3632
3633 ret = misc_register(&tun_miscdev);
3634 if (ret) {
3635 pr_err("Can't register misc device %d\n", TUN_MINOR);
3636 goto err_misc;
3637 }
3638
3639 ret = register_netdevice_notifier(&tun_notifier_block);
3640 if (ret) {
3641 pr_err("Can't register netdevice notifier\n");
3642 goto err_notifier;
3643 }
3644
3645 return 0;
3646
3647err_notifier:
3648 misc_deregister(&tun_miscdev);
3649err_misc:
3650 rtnl_link_unregister(&tun_link_ops);
3651err_linkops:
3652 return ret;
3653}
3654
3655static void tun_cleanup(void)
3656{
3657 misc_deregister(&tun_miscdev);
3658 rtnl_link_unregister(&tun_link_ops);
3659 unregister_netdevice_notifier(&tun_notifier_block);
3660}
3661
3662
3663
3664
3665
3666struct socket *tun_get_socket(struct file *file)
3667{
3668 struct tun_file *tfile;
3669 if (file->f_op != &tun_fops)
3670 return ERR_PTR(-EINVAL);
3671 tfile = file->private_data;
3672 if (!tfile)
3673 return ERR_PTR(-EBADFD);
3674 return &tfile->socket;
3675}
3676EXPORT_SYMBOL_GPL(tun_get_socket);
3677
3678struct ptr_ring *tun_get_tx_ring(struct file *file)
3679{
3680 struct tun_file *tfile;
3681
3682 if (file->f_op != &tun_fops)
3683 return ERR_PTR(-EINVAL);
3684 tfile = file->private_data;
3685 if (!tfile)
3686 return ERR_PTR(-EBADFD);
3687 return &tfile->tx_ring;
3688}
3689EXPORT_SYMBOL_GPL(tun_get_tx_ring);
3690
3691module_init(tun_init);
3692module_exit(tun_cleanup);
3693MODULE_DESCRIPTION(DRV_DESCRIPTION);
3694MODULE_AUTHOR(DRV_COPYRIGHT);
3695MODULE_LICENSE("GPL");
3696MODULE_ALIAS_MISCDEV(TUN_MINOR);
3697MODULE_ALIAS("devname:net/tun");
3698