1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/module.h>
20#include <linux/string.h>
21#include <linux/list.h>
22#include <linux/rculist.h>
23#include <linux/uaccess.h>
24
25#include <linux/kernel.h>
26#include <linux/spinlock.h>
27#include <linux/kthread.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/errno.h>
31#include <linux/jiffies.h>
32
33#include <linux/netdevice.h>
34#include <linux/net.h>
35#include <linux/inetdevice.h>
36#include <linux/skbuff.h>
37#include <linux/init.h>
38#include <linux/in.h>
39#include <linux/ip.h>
40#include <linux/udp.h>
41#include <linux/l2tp.h>
42#include <linux/hash.h>
43#include <linux/sort.h>
44#include <linux/file.h>
45#include <linux/nsproxy.h>
46#include <net/net_namespace.h>
47#include <net/netns/generic.h>
48#include <net/dst.h>
49#include <net/ip.h>
50#include <net/udp.h>
51#include <net/udp_tunnel.h>
52#include <net/inet_common.h>
53#include <net/xfrm.h>
54#include <net/protocol.h>
55#include <net/inet6_connection_sock.h>
56#include <net/inet_ecn.h>
57#include <net/ip6_route.h>
58#include <net/ip6_checksum.h>
59
60#include <asm/byteorder.h>
61#include <linux/atomic.h>
62
63#include "l2tp_core.h"
64#include "trace.h"
65
66#define CREATE_TRACE_POINTS
67#include "trace.h"
68
69#define L2TP_DRV_VERSION "V2.0"
70
71
72#define L2TP_HDRFLAG_T 0x8000
73#define L2TP_HDRFLAG_L 0x4000
74#define L2TP_HDRFLAG_S 0x0800
75#define L2TP_HDRFLAG_O 0x0200
76#define L2TP_HDRFLAG_P 0x0100
77
78#define L2TP_HDR_VER_MASK 0x000F
79#define L2TP_HDR_VER_2 0x0002
80#define L2TP_HDR_VER_3 0x0003
81
82
83#define L2TP_SLFLAG_S 0x40000000
84#define L2TP_SL_SEQ_MASK 0x00ffffff
85
86#define L2TP_HDR_SIZE_MAX 14
87
88
89#define L2TP_DEFAULT_DEBUG_FLAGS 0
90
91
92
93struct l2tp_skb_cb {
94 u32 ns;
95 u16 has_seq;
96 u16 length;
97 unsigned long expires;
98};
99
100#define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *)&(skb)->cb[sizeof(struct inet_skb_parm)])
101
102static struct workqueue_struct *l2tp_wq;
103
104
105static unsigned int l2tp_net_id;
106struct l2tp_net {
107 struct list_head l2tp_tunnel_list;
108
109 spinlock_t l2tp_tunnel_list_lock;
110 struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
111
112 spinlock_t l2tp_session_hlist_lock;
113};
114
115#if IS_ENABLED(CONFIG_IPV6)
116static bool l2tp_sk_is_v6(struct sock *sk)
117{
118 return sk->sk_family == PF_INET6 &&
119 !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
120}
121#endif
122
123static inline struct l2tp_net *l2tp_pernet(const struct net *net)
124{
125 return net_generic(net, l2tp_net_id);
126}
127
128
129
130
131
132
133static inline struct hlist_head *
134l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
135{
136 return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
137}
138
139
140
141
142
143
144
145static inline struct hlist_head *
146l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
147{
148 return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
149}
150
151static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
152{
153 trace_free_tunnel(tunnel);
154 sock_put(tunnel->sock);
155
156}
157
158static void l2tp_session_free(struct l2tp_session *session)
159{
160 trace_free_session(session);
161 if (session->tunnel)
162 l2tp_tunnel_dec_refcount(session->tunnel);
163 kfree(session);
164}
165
166struct l2tp_tunnel *l2tp_sk_to_tunnel(struct sock *sk)
167{
168 struct l2tp_tunnel *tunnel = sk->sk_user_data;
169
170 if (tunnel)
171 if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
172 return NULL;
173
174 return tunnel;
175}
176EXPORT_SYMBOL_GPL(l2tp_sk_to_tunnel);
177
178void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
179{
180 refcount_inc(&tunnel->ref_count);
181}
182EXPORT_SYMBOL_GPL(l2tp_tunnel_inc_refcount);
183
184void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
185{
186 if (refcount_dec_and_test(&tunnel->ref_count))
187 l2tp_tunnel_free(tunnel);
188}
189EXPORT_SYMBOL_GPL(l2tp_tunnel_dec_refcount);
190
191void l2tp_session_inc_refcount(struct l2tp_session *session)
192{
193 refcount_inc(&session->ref_count);
194}
195EXPORT_SYMBOL_GPL(l2tp_session_inc_refcount);
196
197void l2tp_session_dec_refcount(struct l2tp_session *session)
198{
199 if (refcount_dec_and_test(&session->ref_count))
200 l2tp_session_free(session);
201}
202EXPORT_SYMBOL_GPL(l2tp_session_dec_refcount);
203
204
205struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
206{
207 const struct l2tp_net *pn = l2tp_pernet(net);
208 struct l2tp_tunnel *tunnel;
209
210 rcu_read_lock_bh();
211 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
212 if (tunnel->tunnel_id == tunnel_id &&
213 refcount_inc_not_zero(&tunnel->ref_count)) {
214 rcu_read_unlock_bh();
215
216 return tunnel;
217 }
218 }
219 rcu_read_unlock_bh();
220
221 return NULL;
222}
223EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
224
225struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
226{
227 const struct l2tp_net *pn = l2tp_pernet(net);
228 struct l2tp_tunnel *tunnel;
229 int count = 0;
230
231 rcu_read_lock_bh();
232 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
233 if (++count > nth &&
234 refcount_inc_not_zero(&tunnel->ref_count)) {
235 rcu_read_unlock_bh();
236 return tunnel;
237 }
238 }
239 rcu_read_unlock_bh();
240
241 return NULL;
242}
243EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth);
244
245struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel,
246 u32 session_id)
247{
248 struct hlist_head *session_list;
249 struct l2tp_session *session;
250
251 session_list = l2tp_session_id_hash(tunnel, session_id);
252
253 read_lock_bh(&tunnel->hlist_lock);
254 hlist_for_each_entry(session, session_list, hlist)
255 if (session->session_id == session_id) {
256 l2tp_session_inc_refcount(session);
257 read_unlock_bh(&tunnel->hlist_lock);
258
259 return session;
260 }
261 read_unlock_bh(&tunnel->hlist_lock);
262
263 return NULL;
264}
265EXPORT_SYMBOL_GPL(l2tp_tunnel_get_session);
266
267struct l2tp_session *l2tp_session_get(const struct net *net, u32 session_id)
268{
269 struct hlist_head *session_list;
270 struct l2tp_session *session;
271
272 session_list = l2tp_session_id_hash_2(l2tp_pernet(net), session_id);
273
274 rcu_read_lock_bh();
275 hlist_for_each_entry_rcu(session, session_list, global_hlist)
276 if (session->session_id == session_id) {
277 l2tp_session_inc_refcount(session);
278 rcu_read_unlock_bh();
279
280 return session;
281 }
282 rcu_read_unlock_bh();
283
284 return NULL;
285}
286EXPORT_SYMBOL_GPL(l2tp_session_get);
287
288struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth)
289{
290 int hash;
291 struct l2tp_session *session;
292 int count = 0;
293
294 read_lock_bh(&tunnel->hlist_lock);
295 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
296 hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
297 if (++count > nth) {
298 l2tp_session_inc_refcount(session);
299 read_unlock_bh(&tunnel->hlist_lock);
300 return session;
301 }
302 }
303 }
304
305 read_unlock_bh(&tunnel->hlist_lock);
306
307 return NULL;
308}
309EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
310
311
312
313
314struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
315 const char *ifname)
316{
317 struct l2tp_net *pn = l2tp_pernet(net);
318 int hash;
319 struct l2tp_session *session;
320
321 rcu_read_lock_bh();
322 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
323 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
324 if (!strcmp(session->ifname, ifname)) {
325 l2tp_session_inc_refcount(session);
326 rcu_read_unlock_bh();
327
328 return session;
329 }
330 }
331 }
332
333 rcu_read_unlock_bh();
334
335 return NULL;
336}
337EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
338
339int l2tp_session_register(struct l2tp_session *session,
340 struct l2tp_tunnel *tunnel)
341{
342 struct l2tp_session *session_walk;
343 struct hlist_head *g_head;
344 struct hlist_head *head;
345 struct l2tp_net *pn;
346 int err;
347
348 head = l2tp_session_id_hash(tunnel, session->session_id);
349
350 write_lock_bh(&tunnel->hlist_lock);
351 if (!tunnel->acpt_newsess) {
352 err = -ENODEV;
353 goto err_tlock;
354 }
355
356 hlist_for_each_entry(session_walk, head, hlist)
357 if (session_walk->session_id == session->session_id) {
358 err = -EEXIST;
359 goto err_tlock;
360 }
361
362 if (tunnel->version == L2TP_HDR_VER_3) {
363 pn = l2tp_pernet(tunnel->l2tp_net);
364 g_head = l2tp_session_id_hash_2(pn, session->session_id);
365
366 spin_lock_bh(&pn->l2tp_session_hlist_lock);
367
368
369
370
371 hlist_for_each_entry(session_walk, g_head, global_hlist)
372 if (session_walk->session_id == session->session_id &&
373 (session_walk->tunnel->encap == L2TP_ENCAPTYPE_IP ||
374 tunnel->encap == L2TP_ENCAPTYPE_IP)) {
375 err = -EEXIST;
376 goto err_tlock_pnlock;
377 }
378
379 l2tp_tunnel_inc_refcount(tunnel);
380 hlist_add_head_rcu(&session->global_hlist, g_head);
381
382 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
383 } else {
384 l2tp_tunnel_inc_refcount(tunnel);
385 }
386
387 hlist_add_head(&session->hlist, head);
388 write_unlock_bh(&tunnel->hlist_lock);
389
390 trace_register_session(session);
391
392 return 0;
393
394err_tlock_pnlock:
395 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
396err_tlock:
397 write_unlock_bh(&tunnel->hlist_lock);
398
399 return err;
400}
401EXPORT_SYMBOL_GPL(l2tp_session_register);
402
403
404
405
406
407
408
409
410static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
411{
412 struct sk_buff *skbp;
413 struct sk_buff *tmp;
414 u32 ns = L2TP_SKB_CB(skb)->ns;
415
416 spin_lock_bh(&session->reorder_q.lock);
417 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
418 if (L2TP_SKB_CB(skbp)->ns > ns) {
419 __skb_queue_before(&session->reorder_q, skbp, skb);
420 atomic_long_inc(&session->stats.rx_oos_packets);
421 goto out;
422 }
423 }
424
425 __skb_queue_tail(&session->reorder_q, skb);
426
427out:
428 spin_unlock_bh(&session->reorder_q.lock);
429}
430
431
432
433static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
434{
435 struct l2tp_tunnel *tunnel = session->tunnel;
436 int length = L2TP_SKB_CB(skb)->length;
437
438
439
440
441 skb_orphan(skb);
442
443 atomic_long_inc(&tunnel->stats.rx_packets);
444 atomic_long_add(length, &tunnel->stats.rx_bytes);
445 atomic_long_inc(&session->stats.rx_packets);
446 atomic_long_add(length, &session->stats.rx_bytes);
447
448 if (L2TP_SKB_CB(skb)->has_seq) {
449
450 session->nr++;
451 session->nr &= session->nr_max;
452 trace_session_seqnum_update(session);
453 }
454
455
456 if (session->recv_skb)
457 (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
458 else
459 kfree_skb(skb);
460}
461
462
463
464
465static void l2tp_recv_dequeue(struct l2tp_session *session)
466{
467 struct sk_buff *skb;
468 struct sk_buff *tmp;
469
470
471
472
473
474start:
475 spin_lock_bh(&session->reorder_q.lock);
476 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
477 struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
478
479
480 if (time_after(jiffies, cb->expires)) {
481 atomic_long_inc(&session->stats.rx_seq_discards);
482 atomic_long_inc(&session->stats.rx_errors);
483 trace_session_pkt_expired(session, cb->ns);
484 session->reorder_skip = 1;
485 __skb_unlink(skb, &session->reorder_q);
486 kfree_skb(skb);
487 continue;
488 }
489
490 if (cb->has_seq) {
491 if (session->reorder_skip) {
492 session->reorder_skip = 0;
493 session->nr = cb->ns;
494 trace_session_seqnum_reset(session);
495 }
496 if (cb->ns != session->nr)
497 goto out;
498 }
499 __skb_unlink(skb, &session->reorder_q);
500
501
502
503
504 spin_unlock_bh(&session->reorder_q.lock);
505 l2tp_recv_dequeue_skb(session, skb);
506 goto start;
507 }
508
509out:
510 spin_unlock_bh(&session->reorder_q.lock);
511}
512
513static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
514{
515 u32 nws;
516
517 if (nr >= session->nr)
518 nws = nr - session->nr;
519 else
520 nws = (session->nr_max + 1) - (session->nr - nr);
521
522 return nws < session->nr_window_size;
523}
524
525
526
527
528static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
529{
530 struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
531
532 if (!l2tp_seq_check_rx_window(session, cb->ns)) {
533
534
535
536 trace_session_pkt_outside_rx_window(session, cb->ns);
537 goto discard;
538 }
539
540 if (session->reorder_timeout != 0) {
541
542
543
544 l2tp_recv_queue_skb(session, skb);
545 goto out;
546 }
547
548
549
550
551
552
553 if (cb->ns == session->nr) {
554 skb_queue_tail(&session->reorder_q, skb);
555 } else {
556 u32 nr_oos = cb->ns;
557 u32 nr_next = (session->nr_oos + 1) & session->nr_max;
558
559 if (nr_oos == nr_next)
560 session->nr_oos_count++;
561 else
562 session->nr_oos_count = 0;
563
564 session->nr_oos = nr_oos;
565 if (session->nr_oos_count > session->nr_oos_count_max) {
566 session->reorder_skip = 1;
567 }
568 if (!session->reorder_skip) {
569 atomic_long_inc(&session->stats.rx_seq_discards);
570 trace_session_pkt_oos(session, cb->ns);
571 goto discard;
572 }
573 skb_queue_tail(&session->reorder_q, skb);
574 }
575
576out:
577 return 0;
578
579discard:
580 return 1;
581}
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
643 unsigned char *ptr, unsigned char *optr, u16 hdrflags,
644 int length)
645{
646 struct l2tp_tunnel *tunnel = session->tunnel;
647 int offset;
648
649
650 if (session->peer_cookie_len > 0) {
651 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
652 pr_debug_ratelimited("%s: cookie mismatch (%u/%u). Discarding.\n",
653 tunnel->name, tunnel->tunnel_id,
654 session->session_id);
655 atomic_long_inc(&session->stats.rx_cookie_discards);
656 goto discard;
657 }
658 ptr += session->peer_cookie_len;
659 }
660
661
662
663
664
665
666
667
668 L2TP_SKB_CB(skb)->has_seq = 0;
669 if (tunnel->version == L2TP_HDR_VER_2) {
670 if (hdrflags & L2TP_HDRFLAG_S) {
671
672 L2TP_SKB_CB(skb)->ns = ntohs(*(__be16 *)ptr);
673 L2TP_SKB_CB(skb)->has_seq = 1;
674 ptr += 2;
675
676 ptr += 2;
677
678 }
679 } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
680 u32 l2h = ntohl(*(__be32 *)ptr);
681
682 if (l2h & 0x40000000) {
683
684 L2TP_SKB_CB(skb)->ns = l2h & 0x00ffffff;
685 L2TP_SKB_CB(skb)->has_seq = 1;
686 }
687 ptr += 4;
688 }
689
690 if (L2TP_SKB_CB(skb)->has_seq) {
691
692
693
694
695 if (!session->lns_mode && !session->send_seq) {
696 trace_session_seqnum_lns_enable(session);
697 session->send_seq = 1;
698 l2tp_session_set_header_len(session, tunnel->version);
699 }
700 } else {
701
702
703
704 if (session->recv_seq) {
705 pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
706 session->name);
707 atomic_long_inc(&session->stats.rx_seq_discards);
708 goto discard;
709 }
710
711
712
713
714
715
716 if (!session->lns_mode && session->send_seq) {
717 trace_session_seqnum_lns_disable(session);
718 session->send_seq = 0;
719 l2tp_session_set_header_len(session, tunnel->version);
720 } else if (session->send_seq) {
721 pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
722 session->name);
723 atomic_long_inc(&session->stats.rx_seq_discards);
724 goto discard;
725 }
726 }
727
728
729
730
731 if (tunnel->version == L2TP_HDR_VER_2) {
732
733 if (hdrflags & L2TP_HDRFLAG_O) {
734 offset = ntohs(*(__be16 *)ptr);
735 ptr += 2 + offset;
736 }
737 }
738
739 offset = ptr - optr;
740 if (!pskb_may_pull(skb, offset))
741 goto discard;
742
743 __skb_pull(skb, offset);
744
745
746
747
748
749 L2TP_SKB_CB(skb)->length = length;
750 L2TP_SKB_CB(skb)->expires = jiffies +
751 (session->reorder_timeout ? session->reorder_timeout : HZ);
752
753
754
755
756 if (L2TP_SKB_CB(skb)->has_seq) {
757 if (l2tp_recv_data_seq(session, skb))
758 goto discard;
759 } else {
760
761
762
763
764 skb_queue_tail(&session->reorder_q, skb);
765 }
766
767
768 l2tp_recv_dequeue(session);
769
770 return;
771
772discard:
773 atomic_long_inc(&session->stats.rx_errors);
774 kfree_skb(skb);
775}
776EXPORT_SYMBOL_GPL(l2tp_recv_common);
777
778
779
780static void l2tp_session_queue_purge(struct l2tp_session *session)
781{
782 struct sk_buff *skb = NULL;
783
784 while ((skb = skb_dequeue(&session->reorder_q))) {
785 atomic_long_inc(&session->stats.rx_errors);
786 kfree_skb(skb);
787 }
788}
789
790
791
792
793
794
795
796static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
797{
798 struct l2tp_session *session = NULL;
799 unsigned char *ptr, *optr;
800 u16 hdrflags;
801 u32 tunnel_id, session_id;
802 u16 version;
803 int length;
804
805
806
807
808 __skb_pull(skb, sizeof(struct udphdr));
809
810
811 if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
812 pr_debug_ratelimited("%s: recv short packet (len=%d)\n",
813 tunnel->name, skb->len);
814 goto invalid;
815 }
816
817
818 optr = skb->data;
819 ptr = skb->data;
820
821
822 hdrflags = ntohs(*(__be16 *)ptr);
823
824
825 version = hdrflags & L2TP_HDR_VER_MASK;
826 if (version != tunnel->version) {
827 pr_debug_ratelimited("%s: recv protocol version mismatch: got %d expected %d\n",
828 tunnel->name, version, tunnel->version);
829 goto invalid;
830 }
831
832
833 length = skb->len;
834
835
836 if (hdrflags & L2TP_HDRFLAG_T)
837 goto pass;
838
839
840 ptr += 2;
841
842 if (tunnel->version == L2TP_HDR_VER_2) {
843
844 if (hdrflags & L2TP_HDRFLAG_L)
845 ptr += 2;
846
847
848 tunnel_id = ntohs(*(__be16 *)ptr);
849 ptr += 2;
850 session_id = ntohs(*(__be16 *)ptr);
851 ptr += 2;
852 } else {
853 ptr += 2;
854 tunnel_id = tunnel->tunnel_id;
855 session_id = ntohl(*(__be32 *)ptr);
856 ptr += 4;
857 }
858
859
860 session = l2tp_tunnel_get_session(tunnel, session_id);
861 if (!session || !session->recv_skb) {
862 if (session)
863 l2tp_session_dec_refcount(session);
864
865
866 pr_debug_ratelimited("%s: no session found (%u/%u). Passing up.\n",
867 tunnel->name, tunnel_id, session_id);
868 goto pass;
869 }
870
871 if (tunnel->version == L2TP_HDR_VER_3 &&
872 l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
873 l2tp_session_dec_refcount(session);
874 goto invalid;
875 }
876
877 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
878 l2tp_session_dec_refcount(session);
879
880 return 0;
881
882invalid:
883 atomic_long_inc(&tunnel->stats.rx_invalid);
884
885pass:
886
887 __skb_push(skb, sizeof(struct udphdr));
888
889 return 1;
890}
891
892
893
894
895
896
897
898int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
899{
900 struct l2tp_tunnel *tunnel;
901
902
903
904
905
906
907
908 tunnel = rcu_dereference_sk_user_data(sk);
909 if (!tunnel)
910 goto pass_up;
911 if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
912 goto pass_up;
913
914 if (l2tp_udp_recv_core(tunnel, skb))
915 goto pass_up;
916
917 return 0;
918
919pass_up:
920 return 1;
921}
922EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
923
924
925
926
927
928
929
930static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
931{
932 struct l2tp_tunnel *tunnel = session->tunnel;
933 __be16 *bufp = buf;
934 __be16 *optr = buf;
935 u16 flags = L2TP_HDR_VER_2;
936 u32 tunnel_id = tunnel->peer_tunnel_id;
937 u32 session_id = session->peer_session_id;
938
939 if (session->send_seq)
940 flags |= L2TP_HDRFLAG_S;
941
942
943 *bufp++ = htons(flags);
944 *bufp++ = htons(tunnel_id);
945 *bufp++ = htons(session_id);
946 if (session->send_seq) {
947 *bufp++ = htons(session->ns);
948 *bufp++ = 0;
949 session->ns++;
950 session->ns &= 0xffff;
951 trace_session_seqnum_update(session);
952 }
953
954 return bufp - optr;
955}
956
957static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
958{
959 struct l2tp_tunnel *tunnel = session->tunnel;
960 char *bufp = buf;
961 char *optr = bufp;
962
963
964
965
966 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
967 u16 flags = L2TP_HDR_VER_3;
968 *((__be16 *)bufp) = htons(flags);
969 bufp += 2;
970 *((__be16 *)bufp) = 0;
971 bufp += 2;
972 }
973
974 *((__be32 *)bufp) = htonl(session->peer_session_id);
975 bufp += 4;
976 if (session->cookie_len) {
977 memcpy(bufp, &session->cookie[0], session->cookie_len);
978 bufp += session->cookie_len;
979 }
980 if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
981 u32 l2h = 0;
982
983 if (session->send_seq) {
984 l2h = 0x40000000 | session->ns;
985 session->ns++;
986 session->ns &= 0xffffff;
987 trace_session_seqnum_update(session);
988 }
989
990 *((__be32 *)bufp) = htonl(l2h);
991 bufp += 4;
992 }
993
994 return bufp - optr;
995}
996
997
998static int l2tp_xmit_queue(struct l2tp_tunnel *tunnel, struct sk_buff *skb, struct flowi *fl)
999{
1000 int err;
1001
1002 skb->ignore_df = 1;
1003 skb_dst_drop(skb);
1004#if IS_ENABLED(CONFIG_IPV6)
1005 if (l2tp_sk_is_v6(tunnel->sock))
1006 err = inet6_csk_xmit(tunnel->sock, skb, NULL);
1007 else
1008#endif
1009 err = ip_queue_xmit(tunnel->sock, skb, fl);
1010
1011 return err >= 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
1012}
1013
1014static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, unsigned int *len)
1015{
1016 struct l2tp_tunnel *tunnel = session->tunnel;
1017 unsigned int data_len = skb->len;
1018 struct sock *sk = tunnel->sock;
1019 int headroom, uhlen, udp_len;
1020 int ret = NET_XMIT_SUCCESS;
1021 struct inet_sock *inet;
1022 struct udphdr *uh;
1023
1024
1025
1026
1027
1028 uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(*uh) : 0;
1029 headroom = NET_SKB_PAD + sizeof(struct iphdr) + uhlen + session->hdr_len;
1030 if (skb_cow_head(skb, headroom)) {
1031 kfree_skb(skb);
1032 return NET_XMIT_DROP;
1033 }
1034
1035
1036 if (tunnel->version == L2TP_HDR_VER_2)
1037 l2tp_build_l2tpv2_header(session, __skb_push(skb, session->hdr_len));
1038 else
1039 l2tp_build_l2tpv3_header(session, __skb_push(skb, session->hdr_len));
1040
1041
1042 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1043 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
1044 nf_reset_ct(skb);
1045
1046 bh_lock_sock(sk);
1047 if (sock_owned_by_user(sk)) {
1048 kfree_skb(skb);
1049 ret = NET_XMIT_DROP;
1050 goto out_unlock;
1051 }
1052
1053
1054
1055
1056 if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
1057 kfree_skb(skb);
1058 ret = NET_XMIT_DROP;
1059 goto out_unlock;
1060 }
1061
1062
1063
1064
1065 *len = skb->len;
1066
1067 inet = inet_sk(sk);
1068 switch (tunnel->encap) {
1069 case L2TP_ENCAPTYPE_UDP:
1070
1071 __skb_push(skb, sizeof(*uh));
1072 skb_reset_transport_header(skb);
1073 uh = udp_hdr(skb);
1074 uh->source = inet->inet_sport;
1075 uh->dest = inet->inet_dport;
1076 udp_len = uhlen + session->hdr_len + data_len;
1077 uh->len = htons(udp_len);
1078
1079
1080#if IS_ENABLED(CONFIG_IPV6)
1081 if (l2tp_sk_is_v6(sk))
1082 udp6_set_csum(udp_get_no_check6_tx(sk),
1083 skb, &inet6_sk(sk)->saddr,
1084 &sk->sk_v6_daddr, udp_len);
1085 else
1086#endif
1087 udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
1088 inet->inet_daddr, udp_len);
1089 break;
1090
1091 case L2TP_ENCAPTYPE_IP:
1092 break;
1093 }
1094
1095 ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl);
1096
1097out_unlock:
1098 bh_unlock_sock(sk);
1099
1100 return ret;
1101}
1102
1103
1104
1105
1106int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb)
1107{
1108 unsigned int len = 0;
1109 int ret;
1110
1111 ret = l2tp_xmit_core(session, skb, &len);
1112 if (ret == NET_XMIT_SUCCESS) {
1113 atomic_long_inc(&session->tunnel->stats.tx_packets);
1114 atomic_long_add(len, &session->tunnel->stats.tx_bytes);
1115 atomic_long_inc(&session->stats.tx_packets);
1116 atomic_long_add(len, &session->stats.tx_bytes);
1117 } else {
1118 atomic_long_inc(&session->tunnel->stats.tx_errors);
1119 atomic_long_inc(&session->stats.tx_errors);
1120 }
1121 return ret;
1122}
1123EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133static void l2tp_tunnel_destruct(struct sock *sk)
1134{
1135 struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
1136
1137 if (!tunnel)
1138 goto end;
1139
1140
1141 switch (tunnel->encap) {
1142 case L2TP_ENCAPTYPE_UDP:
1143
1144 (udp_sk(sk))->encap_type = 0;
1145 (udp_sk(sk))->encap_rcv = NULL;
1146 (udp_sk(sk))->encap_destroy = NULL;
1147 break;
1148 case L2TP_ENCAPTYPE_IP:
1149 break;
1150 }
1151
1152
1153 sk->sk_destruct = tunnel->old_sk_destruct;
1154 sk->sk_user_data = NULL;
1155
1156
1157 if (sk->sk_destruct)
1158 (*sk->sk_destruct)(sk);
1159
1160 kfree_rcu(tunnel, rcu);
1161end:
1162 return;
1163}
1164
1165
1166static void l2tp_session_unhash(struct l2tp_session *session)
1167{
1168 struct l2tp_tunnel *tunnel = session->tunnel;
1169
1170
1171 if (tunnel) {
1172
1173 write_lock_bh(&tunnel->hlist_lock);
1174 hlist_del_init(&session->hlist);
1175 write_unlock_bh(&tunnel->hlist_lock);
1176
1177
1178 if (tunnel->version != L2TP_HDR_VER_2) {
1179 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1180
1181 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1182 hlist_del_init_rcu(&session->global_hlist);
1183 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1184 synchronize_rcu();
1185 }
1186 }
1187}
1188
1189
1190
1191static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1192{
1193 int hash;
1194 struct hlist_node *walk;
1195 struct hlist_node *tmp;
1196 struct l2tp_session *session;
1197
1198 write_lock_bh(&tunnel->hlist_lock);
1199 tunnel->acpt_newsess = false;
1200 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
1201again:
1202 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1203 session = hlist_entry(walk, struct l2tp_session, hlist);
1204 hlist_del_init(&session->hlist);
1205
1206 write_unlock_bh(&tunnel->hlist_lock);
1207 l2tp_session_delete(session);
1208 write_lock_bh(&tunnel->hlist_lock);
1209
1210
1211
1212
1213
1214
1215 goto again;
1216 }
1217 }
1218 write_unlock_bh(&tunnel->hlist_lock);
1219}
1220
1221
1222static void l2tp_udp_encap_destroy(struct sock *sk)
1223{
1224 struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
1225
1226 if (tunnel)
1227 l2tp_tunnel_delete(tunnel);
1228}
1229
1230
1231static void l2tp_tunnel_del_work(struct work_struct *work)
1232{
1233 struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
1234 del_work);
1235 struct sock *sk = tunnel->sock;
1236 struct socket *sock = sk->sk_socket;
1237 struct l2tp_net *pn;
1238
1239 l2tp_tunnel_closeall(tunnel);
1240
1241
1242
1243
1244 if (tunnel->fd < 0) {
1245 if (sock) {
1246 kernel_sock_shutdown(sock, SHUT_RDWR);
1247 sock_release(sock);
1248 }
1249 }
1250
1251
1252 pn = l2tp_pernet(tunnel->l2tp_net);
1253 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1254 list_del_rcu(&tunnel->list);
1255 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1256
1257
1258 l2tp_tunnel_dec_refcount(tunnel);
1259
1260
1261 l2tp_tunnel_dec_refcount(tunnel);
1262}
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273static int l2tp_tunnel_sock_create(struct net *net,
1274 u32 tunnel_id,
1275 u32 peer_tunnel_id,
1276 struct l2tp_tunnel_cfg *cfg,
1277 struct socket **sockp)
1278{
1279 int err = -EINVAL;
1280 struct socket *sock = NULL;
1281 struct udp_port_cfg udp_conf;
1282
1283 switch (cfg->encap) {
1284 case L2TP_ENCAPTYPE_UDP:
1285 memset(&udp_conf, 0, sizeof(udp_conf));
1286
1287#if IS_ENABLED(CONFIG_IPV6)
1288 if (cfg->local_ip6 && cfg->peer_ip6) {
1289 udp_conf.family = AF_INET6;
1290 memcpy(&udp_conf.local_ip6, cfg->local_ip6,
1291 sizeof(udp_conf.local_ip6));
1292 memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
1293 sizeof(udp_conf.peer_ip6));
1294 udp_conf.use_udp6_tx_checksums =
1295 !cfg->udp6_zero_tx_checksums;
1296 udp_conf.use_udp6_rx_checksums =
1297 !cfg->udp6_zero_rx_checksums;
1298 } else
1299#endif
1300 {
1301 udp_conf.family = AF_INET;
1302 udp_conf.local_ip = cfg->local_ip;
1303 udp_conf.peer_ip = cfg->peer_ip;
1304 udp_conf.use_udp_checksums = cfg->use_udp_checksums;
1305 }
1306
1307 udp_conf.local_udp_port = htons(cfg->local_udp_port);
1308 udp_conf.peer_udp_port = htons(cfg->peer_udp_port);
1309
1310 err = udp_sock_create(net, &udp_conf, &sock);
1311 if (err < 0)
1312 goto out;
1313
1314 break;
1315
1316 case L2TP_ENCAPTYPE_IP:
1317#if IS_ENABLED(CONFIG_IPV6)
1318 if (cfg->local_ip6 && cfg->peer_ip6) {
1319 struct sockaddr_l2tpip6 ip6_addr = {0};
1320
1321 err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
1322 IPPROTO_L2TP, &sock);
1323 if (err < 0)
1324 goto out;
1325
1326 ip6_addr.l2tp_family = AF_INET6;
1327 memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1328 sizeof(ip6_addr.l2tp_addr));
1329 ip6_addr.l2tp_conn_id = tunnel_id;
1330 err = kernel_bind(sock, (struct sockaddr *)&ip6_addr,
1331 sizeof(ip6_addr));
1332 if (err < 0)
1333 goto out;
1334
1335 ip6_addr.l2tp_family = AF_INET6;
1336 memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1337 sizeof(ip6_addr.l2tp_addr));
1338 ip6_addr.l2tp_conn_id = peer_tunnel_id;
1339 err = kernel_connect(sock,
1340 (struct sockaddr *)&ip6_addr,
1341 sizeof(ip6_addr), 0);
1342 if (err < 0)
1343 goto out;
1344 } else
1345#endif
1346 {
1347 struct sockaddr_l2tpip ip_addr = {0};
1348
1349 err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
1350 IPPROTO_L2TP, &sock);
1351 if (err < 0)
1352 goto out;
1353
1354 ip_addr.l2tp_family = AF_INET;
1355 ip_addr.l2tp_addr = cfg->local_ip;
1356 ip_addr.l2tp_conn_id = tunnel_id;
1357 err = kernel_bind(sock, (struct sockaddr *)&ip_addr,
1358 sizeof(ip_addr));
1359 if (err < 0)
1360 goto out;
1361
1362 ip_addr.l2tp_family = AF_INET;
1363 ip_addr.l2tp_addr = cfg->peer_ip;
1364 ip_addr.l2tp_conn_id = peer_tunnel_id;
1365 err = kernel_connect(sock, (struct sockaddr *)&ip_addr,
1366 sizeof(ip_addr), 0);
1367 if (err < 0)
1368 goto out;
1369 }
1370 break;
1371
1372 default:
1373 goto out;
1374 }
1375
1376out:
1377 *sockp = sock;
1378 if (err < 0 && sock) {
1379 kernel_sock_shutdown(sock, SHUT_RDWR);
1380 sock_release(sock);
1381 *sockp = NULL;
1382 }
1383
1384 return err;
1385}
1386
1387static struct lock_class_key l2tp_socket_class;
1388
1389int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
1390 struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1391{
1392 struct l2tp_tunnel *tunnel = NULL;
1393 int err;
1394 enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1395
1396 if (cfg)
1397 encap = cfg->encap;
1398
1399 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
1400 if (!tunnel) {
1401 err = -ENOMEM;
1402 goto err;
1403 }
1404
1405 tunnel->version = version;
1406 tunnel->tunnel_id = tunnel_id;
1407 tunnel->peer_tunnel_id = peer_tunnel_id;
1408
1409 tunnel->magic = L2TP_TUNNEL_MAGIC;
1410 sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1411 rwlock_init(&tunnel->hlist_lock);
1412 tunnel->acpt_newsess = true;
1413
1414 tunnel->encap = encap;
1415
1416 refcount_set(&tunnel->ref_count, 1);
1417 tunnel->fd = fd;
1418
1419
1420 INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1421
1422 INIT_LIST_HEAD(&tunnel->list);
1423
1424 err = 0;
1425err:
1426 if (tunnelp)
1427 *tunnelp = tunnel;
1428
1429 return err;
1430}
1431EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1432
1433static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
1434 enum l2tp_encap_type encap)
1435{
1436 if (!net_eq(sock_net(sk), net))
1437 return -EINVAL;
1438
1439 if (sk->sk_type != SOCK_DGRAM)
1440 return -EPROTONOSUPPORT;
1441
1442 if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
1443 return -EPROTONOSUPPORT;
1444
1445 if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
1446 (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
1447 return -EPROTONOSUPPORT;
1448
1449 if (sk->sk_user_data)
1450 return -EBUSY;
1451
1452 return 0;
1453}
1454
1455int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1456 struct l2tp_tunnel_cfg *cfg)
1457{
1458 struct l2tp_tunnel *tunnel_walk;
1459 struct l2tp_net *pn;
1460 struct socket *sock;
1461 struct sock *sk;
1462 int ret;
1463
1464 if (tunnel->fd < 0) {
1465 ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
1466 tunnel->peer_tunnel_id, cfg,
1467 &sock);
1468 if (ret < 0)
1469 goto err;
1470 } else {
1471 sock = sockfd_lookup(tunnel->fd, &ret);
1472 if (!sock)
1473 goto err;
1474
1475 ret = l2tp_validate_socket(sock->sk, net, tunnel->encap);
1476 if (ret < 0)
1477 goto err_sock;
1478 }
1479
1480 tunnel->l2tp_net = net;
1481 pn = l2tp_pernet(net);
1482
1483 sk = sock->sk;
1484 sock_hold(sk);
1485 tunnel->sock = sk;
1486
1487 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1488 list_for_each_entry(tunnel_walk, &pn->l2tp_tunnel_list, list) {
1489 if (tunnel_walk->tunnel_id == tunnel->tunnel_id) {
1490 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1491 sock_put(sk);
1492 ret = -EEXIST;
1493 goto err_sock;
1494 }
1495 }
1496 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1497 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1498
1499 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1500 struct udp_tunnel_sock_cfg udp_cfg = {
1501 .sk_user_data = tunnel,
1502 .encap_type = UDP_ENCAP_L2TPINUDP,
1503 .encap_rcv = l2tp_udp_encap_recv,
1504 .encap_destroy = l2tp_udp_encap_destroy,
1505 };
1506
1507 setup_udp_tunnel_sock(net, sock, &udp_cfg);
1508 } else {
1509 sk->sk_user_data = tunnel;
1510 }
1511
1512 tunnel->old_sk_destruct = sk->sk_destruct;
1513 sk->sk_destruct = &l2tp_tunnel_destruct;
1514 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class,
1515 "l2tp_sock");
1516 sk->sk_allocation = GFP_ATOMIC;
1517
1518 trace_register_tunnel(tunnel);
1519
1520 if (tunnel->fd >= 0)
1521 sockfd_put(sock);
1522
1523 return 0;
1524
1525err_sock:
1526 if (tunnel->fd < 0)
1527 sock_release(sock);
1528 else
1529 sockfd_put(sock);
1530err:
1531 return ret;
1532}
1533EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
1534
1535
1536
1537void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1538{
1539 if (!test_and_set_bit(0, &tunnel->dead)) {
1540 trace_delete_tunnel(tunnel);
1541 l2tp_tunnel_inc_refcount(tunnel);
1542 queue_work(l2tp_wq, &tunnel->del_work);
1543 }
1544}
1545EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1546
1547void l2tp_session_delete(struct l2tp_session *session)
1548{
1549 if (test_and_set_bit(0, &session->dead))
1550 return;
1551
1552 trace_delete_session(session);
1553 l2tp_session_unhash(session);
1554 l2tp_session_queue_purge(session);
1555 if (session->session_close)
1556 (*session->session_close)(session);
1557
1558 l2tp_session_dec_refcount(session);
1559}
1560EXPORT_SYMBOL_GPL(l2tp_session_delete);
1561
1562
1563
1564
1565void l2tp_session_set_header_len(struct l2tp_session *session, int version)
1566{
1567 if (version == L2TP_HDR_VER_2) {
1568 session->hdr_len = 6;
1569 if (session->send_seq)
1570 session->hdr_len += 4;
1571 } else {
1572 session->hdr_len = 4 + session->cookie_len;
1573 session->hdr_len += l2tp_get_l2specific_len(session);
1574 if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
1575 session->hdr_len += 4;
1576 }
1577}
1578EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1579
1580struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id,
1581 u32 peer_session_id, struct l2tp_session_cfg *cfg)
1582{
1583 struct l2tp_session *session;
1584
1585 session = kzalloc(sizeof(*session) + priv_size, GFP_KERNEL);
1586 if (session) {
1587 session->magic = L2TP_SESSION_MAGIC;
1588 session->tunnel = tunnel;
1589
1590 session->session_id = session_id;
1591 session->peer_session_id = peer_session_id;
1592 session->nr = 0;
1593 if (tunnel->version == L2TP_HDR_VER_2)
1594 session->nr_max = 0xffff;
1595 else
1596 session->nr_max = 0xffffff;
1597 session->nr_window_size = session->nr_max / 2;
1598 session->nr_oos_count_max = 4;
1599
1600
1601 session->reorder_skip = 1;
1602
1603 sprintf(&session->name[0], "sess %u/%u",
1604 tunnel->tunnel_id, session->session_id);
1605
1606 skb_queue_head_init(&session->reorder_q);
1607
1608 INIT_HLIST_NODE(&session->hlist);
1609 INIT_HLIST_NODE(&session->global_hlist);
1610
1611 if (cfg) {
1612 session->pwtype = cfg->pw_type;
1613 session->send_seq = cfg->send_seq;
1614 session->recv_seq = cfg->recv_seq;
1615 session->lns_mode = cfg->lns_mode;
1616 session->reorder_timeout = cfg->reorder_timeout;
1617 session->l2specific_type = cfg->l2specific_type;
1618 session->cookie_len = cfg->cookie_len;
1619 memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1620 session->peer_cookie_len = cfg->peer_cookie_len;
1621 memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1622 }
1623
1624 l2tp_session_set_header_len(session, tunnel->version);
1625
1626 refcount_set(&session->ref_count, 1);
1627
1628 return session;
1629 }
1630
1631 return ERR_PTR(-ENOMEM);
1632}
1633EXPORT_SYMBOL_GPL(l2tp_session_create);
1634
1635
1636
1637
1638
1639static __net_init int l2tp_init_net(struct net *net)
1640{
1641 struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1642 int hash;
1643
1644 INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
1645 spin_lock_init(&pn->l2tp_tunnel_list_lock);
1646
1647 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1648 INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
1649
1650 spin_lock_init(&pn->l2tp_session_hlist_lock);
1651
1652 return 0;
1653}
1654
1655static __net_exit void l2tp_exit_net(struct net *net)
1656{
1657 struct l2tp_net *pn = l2tp_pernet(net);
1658 struct l2tp_tunnel *tunnel = NULL;
1659 int hash;
1660
1661 rcu_read_lock_bh();
1662 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
1663 l2tp_tunnel_delete(tunnel);
1664 }
1665 rcu_read_unlock_bh();
1666
1667 if (l2tp_wq)
1668 flush_workqueue(l2tp_wq);
1669 rcu_barrier();
1670
1671 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1672 WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash]));
1673}
1674
1675static struct pernet_operations l2tp_net_ops = {
1676 .init = l2tp_init_net,
1677 .exit = l2tp_exit_net,
1678 .id = &l2tp_net_id,
1679 .size = sizeof(struct l2tp_net),
1680};
1681
1682static int __init l2tp_init(void)
1683{
1684 int rc = 0;
1685
1686 rc = register_pernet_device(&l2tp_net_ops);
1687 if (rc)
1688 goto out;
1689
1690 l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
1691 if (!l2tp_wq) {
1692 pr_err("alloc_workqueue failed\n");
1693 unregister_pernet_device(&l2tp_net_ops);
1694 rc = -ENOMEM;
1695 goto out;
1696 }
1697
1698 pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1699
1700out:
1701 return rc;
1702}
1703
1704static void __exit l2tp_exit(void)
1705{
1706 unregister_pernet_device(&l2tp_net_ops);
1707 if (l2tp_wq) {
1708 destroy_workqueue(l2tp_wq);
1709 l2tp_wq = NULL;
1710 }
1711}
1712
1713module_init(l2tp_init);
1714module_exit(l2tp_exit);
1715
1716MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1717MODULE_DESCRIPTION("L2TP core");
1718MODULE_LICENSE("GPL");
1719MODULE_VERSION(L2TP_DRV_VERSION);
1720