1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/module.h>
20#include <linux/string.h>
21#include <linux/list.h>
22#include <linux/rculist.h>
23#include <linux/uaccess.h>
24
25#include <linux/kernel.h>
26#include <linux/spinlock.h>
27#include <linux/kthread.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/errno.h>
31#include <linux/jiffies.h>
32
33#include <linux/netdevice.h>
34#include <linux/net.h>
35#include <linux/inetdevice.h>
36#include <linux/skbuff.h>
37#include <linux/init.h>
38#include <linux/in.h>
39#include <linux/ip.h>
40#include <linux/udp.h>
41#include <linux/l2tp.h>
42#include <linux/hash.h>
43#include <linux/sort.h>
44#include <linux/file.h>
45#include <linux/nsproxy.h>
46#include <net/net_namespace.h>
47#include <net/netns/generic.h>
48#include <net/dst.h>
49#include <net/ip.h>
50#include <net/udp.h>
51#include <net/udp_tunnel.h>
52#include <net/inet_common.h>
53#include <net/xfrm.h>
54#include <net/protocol.h>
55#include <net/inet6_connection_sock.h>
56#include <net/inet_ecn.h>
57#include <net/ip6_route.h>
58#include <net/ip6_checksum.h>
59
60#include <asm/byteorder.h>
61#include <linux/atomic.h>
62
63#include "l2tp_core.h"
64#include "trace.h"
65
66#define CREATE_TRACE_POINTS
67#include "trace.h"
68
69#define L2TP_DRV_VERSION "V2.0"
70
71
72#define L2TP_HDRFLAG_T 0x8000
73#define L2TP_HDRFLAG_L 0x4000
74#define L2TP_HDRFLAG_S 0x0800
75#define L2TP_HDRFLAG_O 0x0200
76#define L2TP_HDRFLAG_P 0x0100
77
78#define L2TP_HDR_VER_MASK 0x000F
79#define L2TP_HDR_VER_2 0x0002
80#define L2TP_HDR_VER_3 0x0003
81
82
83#define L2TP_SLFLAG_S 0x40000000
84#define L2TP_SL_SEQ_MASK 0x00ffffff
85
86#define L2TP_HDR_SIZE_MAX 14
87
88
89#define L2TP_DEFAULT_DEBUG_FLAGS 0
90
91
92
93struct l2tp_skb_cb {
94 u32 ns;
95 u16 has_seq;
96 u16 length;
97 unsigned long expires;
98};
99
100#define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *)&(skb)->cb[sizeof(struct inet_skb_parm)])
101
102static struct workqueue_struct *l2tp_wq;
103
104
105static unsigned int l2tp_net_id;
106struct l2tp_net {
107 struct list_head l2tp_tunnel_list;
108
109 spinlock_t l2tp_tunnel_list_lock;
110 struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
111
112 spinlock_t l2tp_session_hlist_lock;
113};
114
115#if IS_ENABLED(CONFIG_IPV6)
116static bool l2tp_sk_is_v6(struct sock *sk)
117{
118 return sk->sk_family == PF_INET6 &&
119 !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
120}
121#endif
122
123static inline struct l2tp_net *l2tp_pernet(const struct net *net)
124{
125 return net_generic(net, l2tp_net_id);
126}
127
128
129
130
131
132
133static inline struct hlist_head *
134l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
135{
136 return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
137}
138
139
140
141
142
143
144
145static inline struct hlist_head *
146l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
147{
148 return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
149}
150
151static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
152{
153 trace_free_tunnel(tunnel);
154 sock_put(tunnel->sock);
155
156}
157
158static void l2tp_session_free(struct l2tp_session *session)
159{
160 trace_free_session(session);
161 if (session->tunnel)
162 l2tp_tunnel_dec_refcount(session->tunnel);
163 kfree(session);
164}
165
166struct l2tp_tunnel *l2tp_sk_to_tunnel(struct sock *sk)
167{
168 struct l2tp_tunnel *tunnel = sk->sk_user_data;
169
170 if (tunnel)
171 if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
172 return NULL;
173
174 return tunnel;
175}
176EXPORT_SYMBOL_GPL(l2tp_sk_to_tunnel);
177
178void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
179{
180 refcount_inc(&tunnel->ref_count);
181}
182EXPORT_SYMBOL_GPL(l2tp_tunnel_inc_refcount);
183
184void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
185{
186 if (refcount_dec_and_test(&tunnel->ref_count))
187 l2tp_tunnel_free(tunnel);
188}
189EXPORT_SYMBOL_GPL(l2tp_tunnel_dec_refcount);
190
191void l2tp_session_inc_refcount(struct l2tp_session *session)
192{
193 refcount_inc(&session->ref_count);
194}
195EXPORT_SYMBOL_GPL(l2tp_session_inc_refcount);
196
197void l2tp_session_dec_refcount(struct l2tp_session *session)
198{
199 if (refcount_dec_and_test(&session->ref_count))
200 l2tp_session_free(session);
201}
202EXPORT_SYMBOL_GPL(l2tp_session_dec_refcount);
203
204
205struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
206{
207 const struct l2tp_net *pn = l2tp_pernet(net);
208 struct l2tp_tunnel *tunnel;
209
210 rcu_read_lock_bh();
211 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
212 if (tunnel->tunnel_id == tunnel_id &&
213 refcount_inc_not_zero(&tunnel->ref_count)) {
214 rcu_read_unlock_bh();
215
216 return tunnel;
217 }
218 }
219 rcu_read_unlock_bh();
220
221 return NULL;
222}
223EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
224
225struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
226{
227 const struct l2tp_net *pn = l2tp_pernet(net);
228 struct l2tp_tunnel *tunnel;
229 int count = 0;
230
231 rcu_read_lock_bh();
232 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
233 if (++count > nth &&
234 refcount_inc_not_zero(&tunnel->ref_count)) {
235 rcu_read_unlock_bh();
236 return tunnel;
237 }
238 }
239 rcu_read_unlock_bh();
240
241 return NULL;
242}
243EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth);
244
245struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel,
246 u32 session_id)
247{
248 struct hlist_head *session_list;
249 struct l2tp_session *session;
250
251 session_list = l2tp_session_id_hash(tunnel, session_id);
252
253 read_lock_bh(&tunnel->hlist_lock);
254 hlist_for_each_entry(session, session_list, hlist)
255 if (session->session_id == session_id) {
256 l2tp_session_inc_refcount(session);
257 read_unlock_bh(&tunnel->hlist_lock);
258
259 return session;
260 }
261 read_unlock_bh(&tunnel->hlist_lock);
262
263 return NULL;
264}
265EXPORT_SYMBOL_GPL(l2tp_tunnel_get_session);
266
267struct l2tp_session *l2tp_session_get(const struct net *net, u32 session_id)
268{
269 struct hlist_head *session_list;
270 struct l2tp_session *session;
271
272 session_list = l2tp_session_id_hash_2(l2tp_pernet(net), session_id);
273
274 rcu_read_lock_bh();
275 hlist_for_each_entry_rcu(session, session_list, global_hlist)
276 if (session->session_id == session_id) {
277 l2tp_session_inc_refcount(session);
278 rcu_read_unlock_bh();
279
280 return session;
281 }
282 rcu_read_unlock_bh();
283
284 return NULL;
285}
286EXPORT_SYMBOL_GPL(l2tp_session_get);
287
288struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth)
289{
290 int hash;
291 struct l2tp_session *session;
292 int count = 0;
293
294 read_lock_bh(&tunnel->hlist_lock);
295 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
296 hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
297 if (++count > nth) {
298 l2tp_session_inc_refcount(session);
299 read_unlock_bh(&tunnel->hlist_lock);
300 return session;
301 }
302 }
303 }
304
305 read_unlock_bh(&tunnel->hlist_lock);
306
307 return NULL;
308}
309EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
310
311
312
313
314struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
315 const char *ifname)
316{
317 struct l2tp_net *pn = l2tp_pernet(net);
318 int hash;
319 struct l2tp_session *session;
320
321 rcu_read_lock_bh();
322 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
323 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
324 if (!strcmp(session->ifname, ifname)) {
325 l2tp_session_inc_refcount(session);
326 rcu_read_unlock_bh();
327
328 return session;
329 }
330 }
331 }
332
333 rcu_read_unlock_bh();
334
335 return NULL;
336}
337EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
338
339int l2tp_session_register(struct l2tp_session *session,
340 struct l2tp_tunnel *tunnel)
341{
342 struct l2tp_session *session_walk;
343 struct hlist_head *g_head;
344 struct hlist_head *head;
345 struct l2tp_net *pn;
346 int err;
347
348 head = l2tp_session_id_hash(tunnel, session->session_id);
349
350 write_lock_bh(&tunnel->hlist_lock);
351 if (!tunnel->acpt_newsess) {
352 err = -ENODEV;
353 goto err_tlock;
354 }
355
356 hlist_for_each_entry(session_walk, head, hlist)
357 if (session_walk->session_id == session->session_id) {
358 err = -EEXIST;
359 goto err_tlock;
360 }
361
362 if (tunnel->version == L2TP_HDR_VER_3) {
363 pn = l2tp_pernet(tunnel->l2tp_net);
364 g_head = l2tp_session_id_hash_2(pn, session->session_id);
365
366 spin_lock_bh(&pn->l2tp_session_hlist_lock);
367
368
369
370
371 hlist_for_each_entry(session_walk, g_head, global_hlist)
372 if (session_walk->session_id == session->session_id &&
373 (session_walk->tunnel->encap == L2TP_ENCAPTYPE_IP ||
374 tunnel->encap == L2TP_ENCAPTYPE_IP)) {
375 err = -EEXIST;
376 goto err_tlock_pnlock;
377 }
378
379 l2tp_tunnel_inc_refcount(tunnel);
380 hlist_add_head_rcu(&session->global_hlist, g_head);
381
382 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
383 } else {
384 l2tp_tunnel_inc_refcount(tunnel);
385 }
386
387 hlist_add_head(&session->hlist, head);
388 write_unlock_bh(&tunnel->hlist_lock);
389
390 trace_register_session(session);
391
392 return 0;
393
394err_tlock_pnlock:
395 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
396err_tlock:
397 write_unlock_bh(&tunnel->hlist_lock);
398
399 return err;
400}
401EXPORT_SYMBOL_GPL(l2tp_session_register);
402
403
404
405
406
407
408
409
410static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
411{
412 struct sk_buff *skbp;
413 struct sk_buff *tmp;
414 u32 ns = L2TP_SKB_CB(skb)->ns;
415
416 spin_lock_bh(&session->reorder_q.lock);
417 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
418 if (L2TP_SKB_CB(skbp)->ns > ns) {
419 __skb_queue_before(&session->reorder_q, skbp, skb);
420 atomic_long_inc(&session->stats.rx_oos_packets);
421 goto out;
422 }
423 }
424
425 __skb_queue_tail(&session->reorder_q, skb);
426
427out:
428 spin_unlock_bh(&session->reorder_q.lock);
429}
430
431
432
433static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
434{
435 struct l2tp_tunnel *tunnel = session->tunnel;
436 int length = L2TP_SKB_CB(skb)->length;
437
438
439
440
441 skb_orphan(skb);
442
443 atomic_long_inc(&tunnel->stats.rx_packets);
444 atomic_long_add(length, &tunnel->stats.rx_bytes);
445 atomic_long_inc(&session->stats.rx_packets);
446 atomic_long_add(length, &session->stats.rx_bytes);
447
448 if (L2TP_SKB_CB(skb)->has_seq) {
449
450 session->nr++;
451 session->nr &= session->nr_max;
452 trace_session_seqnum_update(session);
453 }
454
455
456 if (session->recv_skb)
457 (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
458 else
459 kfree_skb(skb);
460}
461
462
463
464
465static void l2tp_recv_dequeue(struct l2tp_session *session)
466{
467 struct sk_buff *skb;
468 struct sk_buff *tmp;
469
470
471
472
473
474start:
475 spin_lock_bh(&session->reorder_q.lock);
476 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
477 struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
478
479
480 if (time_after(jiffies, cb->expires)) {
481 atomic_long_inc(&session->stats.rx_seq_discards);
482 atomic_long_inc(&session->stats.rx_errors);
483 trace_session_pkt_expired(session, cb->ns);
484 session->reorder_skip = 1;
485 __skb_unlink(skb, &session->reorder_q);
486 kfree_skb(skb);
487 continue;
488 }
489
490 if (cb->has_seq) {
491 if (session->reorder_skip) {
492 session->reorder_skip = 0;
493 session->nr = cb->ns;
494 trace_session_seqnum_reset(session);
495 }
496 if (cb->ns != session->nr)
497 goto out;
498 }
499 __skb_unlink(skb, &session->reorder_q);
500
501
502
503
504 spin_unlock_bh(&session->reorder_q.lock);
505 l2tp_recv_dequeue_skb(session, skb);
506 goto start;
507 }
508
509out:
510 spin_unlock_bh(&session->reorder_q.lock);
511}
512
513static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
514{
515 u32 nws;
516
517 if (nr >= session->nr)
518 nws = nr - session->nr;
519 else
520 nws = (session->nr_max + 1) - (session->nr - nr);
521
522 return nws < session->nr_window_size;
523}
524
525
526
527
528static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
529{
530 struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
531
532 if (!l2tp_seq_check_rx_window(session, cb->ns)) {
533
534
535
536 trace_session_pkt_outside_rx_window(session, cb->ns);
537 goto discard;
538 }
539
540 if (session->reorder_timeout != 0) {
541
542
543
544 l2tp_recv_queue_skb(session, skb);
545 goto out;
546 }
547
548
549
550
551
552
553 if (cb->ns == session->nr) {
554 skb_queue_tail(&session->reorder_q, skb);
555 } else {
556 u32 nr_oos = cb->ns;
557 u32 nr_next = (session->nr_oos + 1) & session->nr_max;
558
559 if (nr_oos == nr_next)
560 session->nr_oos_count++;
561 else
562 session->nr_oos_count = 0;
563
564 session->nr_oos = nr_oos;
565 if (session->nr_oos_count > session->nr_oos_count_max) {
566 session->reorder_skip = 1;
567 }
568 if (!session->reorder_skip) {
569 atomic_long_inc(&session->stats.rx_seq_discards);
570 trace_session_pkt_oos(session, cb->ns);
571 goto discard;
572 }
573 skb_queue_tail(&session->reorder_q, skb);
574 }
575
576out:
577 return 0;
578
579discard:
580 return 1;
581}
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
643 unsigned char *ptr, unsigned char *optr, u16 hdrflags,
644 int length)
645{
646 struct l2tp_tunnel *tunnel = session->tunnel;
647 int offset;
648
649
650 if (session->peer_cookie_len > 0) {
651 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
652 pr_warn_ratelimited("%s: cookie mismatch (%u/%u). Discarding.\n",
653 tunnel->name, tunnel->tunnel_id,
654 session->session_id);
655 atomic_long_inc(&session->stats.rx_cookie_discards);
656 goto discard;
657 }
658 ptr += session->peer_cookie_len;
659 }
660
661
662
663
664
665
666
667
668 L2TP_SKB_CB(skb)->has_seq = 0;
669 if (tunnel->version == L2TP_HDR_VER_2) {
670 if (hdrflags & L2TP_HDRFLAG_S) {
671
672 L2TP_SKB_CB(skb)->ns = ntohs(*(__be16 *)ptr);
673 L2TP_SKB_CB(skb)->has_seq = 1;
674 ptr += 2;
675
676 ptr += 2;
677
678 }
679 } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
680 u32 l2h = ntohl(*(__be32 *)ptr);
681
682 if (l2h & 0x40000000) {
683
684 L2TP_SKB_CB(skb)->ns = l2h & 0x00ffffff;
685 L2TP_SKB_CB(skb)->has_seq = 1;
686 }
687 ptr += 4;
688 }
689
690 if (L2TP_SKB_CB(skb)->has_seq) {
691
692
693
694
695 if (!session->lns_mode && !session->send_seq) {
696 trace_session_seqnum_lns_enable(session);
697 session->send_seq = 1;
698 l2tp_session_set_header_len(session, tunnel->version);
699 }
700 } else {
701
702
703
704 if (session->recv_seq) {
705 pr_warn_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
706 session->name);
707 atomic_long_inc(&session->stats.rx_seq_discards);
708 goto discard;
709 }
710
711
712
713
714
715
716 if (!session->lns_mode && session->send_seq) {
717 trace_session_seqnum_lns_disable(session);
718 session->send_seq = 0;
719 l2tp_session_set_header_len(session, tunnel->version);
720 } else if (session->send_seq) {
721 pr_warn_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
722 session->name);
723 atomic_long_inc(&session->stats.rx_seq_discards);
724 goto discard;
725 }
726 }
727
728
729
730
731 if (tunnel->version == L2TP_HDR_VER_2) {
732
733 if (hdrflags & L2TP_HDRFLAG_O) {
734 offset = ntohs(*(__be16 *)ptr);
735 ptr += 2 + offset;
736 }
737 }
738
739 offset = ptr - optr;
740 if (!pskb_may_pull(skb, offset))
741 goto discard;
742
743 __skb_pull(skb, offset);
744
745
746
747
748
749 L2TP_SKB_CB(skb)->length = length;
750 L2TP_SKB_CB(skb)->expires = jiffies +
751 (session->reorder_timeout ? session->reorder_timeout : HZ);
752
753
754
755
756 if (L2TP_SKB_CB(skb)->has_seq) {
757 if (l2tp_recv_data_seq(session, skb))
758 goto discard;
759 } else {
760
761
762
763
764 skb_queue_tail(&session->reorder_q, skb);
765 }
766
767
768 l2tp_recv_dequeue(session);
769
770 return;
771
772discard:
773 atomic_long_inc(&session->stats.rx_errors);
774 kfree_skb(skb);
775}
776EXPORT_SYMBOL_GPL(l2tp_recv_common);
777
778
779
780static void l2tp_session_queue_purge(struct l2tp_session *session)
781{
782 struct sk_buff *skb = NULL;
783
784 while ((skb = skb_dequeue(&session->reorder_q))) {
785 atomic_long_inc(&session->stats.rx_errors);
786 kfree_skb(skb);
787 }
788}
789
790
791
792
793
794
795
796static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
797{
798 struct l2tp_session *session = NULL;
799 unsigned char *ptr, *optr;
800 u16 hdrflags;
801 u32 tunnel_id, session_id;
802 u16 version;
803 int length;
804
805
806
807
808 __skb_pull(skb, sizeof(struct udphdr));
809
810
811 if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
812 pr_warn_ratelimited("%s: recv short packet (len=%d)\n",
813 tunnel->name, skb->len);
814 goto error;
815 }
816
817
818 optr = skb->data;
819 ptr = skb->data;
820
821
822 hdrflags = ntohs(*(__be16 *)ptr);
823
824
825 version = hdrflags & L2TP_HDR_VER_MASK;
826 if (version != tunnel->version) {
827 pr_warn_ratelimited("%s: recv protocol version mismatch: got %d expected %d\n",
828 tunnel->name, version, tunnel->version);
829 goto error;
830 }
831
832
833 length = skb->len;
834
835
836 if (hdrflags & L2TP_HDRFLAG_T)
837 goto error;
838
839
840 ptr += 2;
841
842 if (tunnel->version == L2TP_HDR_VER_2) {
843
844 if (hdrflags & L2TP_HDRFLAG_L)
845 ptr += 2;
846
847
848 tunnel_id = ntohs(*(__be16 *)ptr);
849 ptr += 2;
850 session_id = ntohs(*(__be16 *)ptr);
851 ptr += 2;
852 } else {
853 ptr += 2;
854 tunnel_id = tunnel->tunnel_id;
855 session_id = ntohl(*(__be32 *)ptr);
856 ptr += 4;
857 }
858
859
860 session = l2tp_tunnel_get_session(tunnel, session_id);
861 if (!session || !session->recv_skb) {
862 if (session)
863 l2tp_session_dec_refcount(session);
864
865
866 pr_warn_ratelimited("%s: no session found (%u/%u). Passing up.\n",
867 tunnel->name, tunnel_id, session_id);
868 goto error;
869 }
870
871 if (tunnel->version == L2TP_HDR_VER_3 &&
872 l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
873 goto error;
874
875 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
876 l2tp_session_dec_refcount(session);
877
878 return 0;
879
880error:
881
882 __skb_push(skb, sizeof(struct udphdr));
883
884 return 1;
885}
886
887
888
889
890
891
892
893int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
894{
895 struct l2tp_tunnel *tunnel;
896
897
898
899
900
901
902
903 tunnel = rcu_dereference_sk_user_data(sk);
904 if (!tunnel)
905 goto pass_up;
906 if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
907 goto pass_up;
908
909 if (l2tp_udp_recv_core(tunnel, skb))
910 goto pass_up;
911
912 return 0;
913
914pass_up:
915 return 1;
916}
917EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
918
919
920
921
922
923
924
925static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
926{
927 struct l2tp_tunnel *tunnel = session->tunnel;
928 __be16 *bufp = buf;
929 __be16 *optr = buf;
930 u16 flags = L2TP_HDR_VER_2;
931 u32 tunnel_id = tunnel->peer_tunnel_id;
932 u32 session_id = session->peer_session_id;
933
934 if (session->send_seq)
935 flags |= L2TP_HDRFLAG_S;
936
937
938 *bufp++ = htons(flags);
939 *bufp++ = htons(tunnel_id);
940 *bufp++ = htons(session_id);
941 if (session->send_seq) {
942 *bufp++ = htons(session->ns);
943 *bufp++ = 0;
944 session->ns++;
945 session->ns &= 0xffff;
946 trace_session_seqnum_update(session);
947 }
948
949 return bufp - optr;
950}
951
952static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
953{
954 struct l2tp_tunnel *tunnel = session->tunnel;
955 char *bufp = buf;
956 char *optr = bufp;
957
958
959
960
961 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
962 u16 flags = L2TP_HDR_VER_3;
963 *((__be16 *)bufp) = htons(flags);
964 bufp += 2;
965 *((__be16 *)bufp) = 0;
966 bufp += 2;
967 }
968
969 *((__be32 *)bufp) = htonl(session->peer_session_id);
970 bufp += 4;
971 if (session->cookie_len) {
972 memcpy(bufp, &session->cookie[0], session->cookie_len);
973 bufp += session->cookie_len;
974 }
975 if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
976 u32 l2h = 0;
977
978 if (session->send_seq) {
979 l2h = 0x40000000 | session->ns;
980 session->ns++;
981 session->ns &= 0xffffff;
982 trace_session_seqnum_update(session);
983 }
984
985 *((__be32 *)bufp) = htonl(l2h);
986 bufp += 4;
987 }
988
989 return bufp - optr;
990}
991
992
993static int l2tp_xmit_queue(struct l2tp_tunnel *tunnel, struct sk_buff *skb, struct flowi *fl)
994{
995 int err;
996
997 skb->ignore_df = 1;
998 skb_dst_drop(skb);
999#if IS_ENABLED(CONFIG_IPV6)
1000 if (l2tp_sk_is_v6(tunnel->sock))
1001 err = inet6_csk_xmit(tunnel->sock, skb, NULL);
1002 else
1003#endif
1004 err = ip_queue_xmit(tunnel->sock, skb, fl);
1005
1006 return err >= 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
1007}
1008
1009static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, unsigned int *len)
1010{
1011 struct l2tp_tunnel *tunnel = session->tunnel;
1012 unsigned int data_len = skb->len;
1013 struct sock *sk = tunnel->sock;
1014 int headroom, uhlen, udp_len;
1015 int ret = NET_XMIT_SUCCESS;
1016 struct inet_sock *inet;
1017 struct udphdr *uh;
1018
1019
1020
1021
1022
1023 uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(*uh) : 0;
1024 headroom = NET_SKB_PAD + sizeof(struct iphdr) + uhlen + session->hdr_len;
1025 if (skb_cow_head(skb, headroom)) {
1026 kfree_skb(skb);
1027 return NET_XMIT_DROP;
1028 }
1029
1030
1031 if (tunnel->version == L2TP_HDR_VER_2)
1032 l2tp_build_l2tpv2_header(session, __skb_push(skb, session->hdr_len));
1033 else
1034 l2tp_build_l2tpv3_header(session, __skb_push(skb, session->hdr_len));
1035
1036
1037 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1038 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
1039 nf_reset_ct(skb);
1040
1041 bh_lock_sock(sk);
1042 if (sock_owned_by_user(sk)) {
1043 kfree_skb(skb);
1044 ret = NET_XMIT_DROP;
1045 goto out_unlock;
1046 }
1047
1048
1049
1050
1051 if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
1052 kfree_skb(skb);
1053 ret = NET_XMIT_DROP;
1054 goto out_unlock;
1055 }
1056
1057
1058
1059
1060 *len = skb->len;
1061
1062 inet = inet_sk(sk);
1063 switch (tunnel->encap) {
1064 case L2TP_ENCAPTYPE_UDP:
1065
1066 __skb_push(skb, sizeof(*uh));
1067 skb_reset_transport_header(skb);
1068 uh = udp_hdr(skb);
1069 uh->source = inet->inet_sport;
1070 uh->dest = inet->inet_dport;
1071 udp_len = uhlen + session->hdr_len + data_len;
1072 uh->len = htons(udp_len);
1073
1074
1075#if IS_ENABLED(CONFIG_IPV6)
1076 if (l2tp_sk_is_v6(sk))
1077 udp6_set_csum(udp_get_no_check6_tx(sk),
1078 skb, &inet6_sk(sk)->saddr,
1079 &sk->sk_v6_daddr, udp_len);
1080 else
1081#endif
1082 udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
1083 inet->inet_daddr, udp_len);
1084 break;
1085
1086 case L2TP_ENCAPTYPE_IP:
1087 break;
1088 }
1089
1090 ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl);
1091
1092out_unlock:
1093 bh_unlock_sock(sk);
1094
1095 return ret;
1096}
1097
1098
1099
1100
1101int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb)
1102{
1103 unsigned int len = 0;
1104 int ret;
1105
1106 ret = l2tp_xmit_core(session, skb, &len);
1107 if (ret == NET_XMIT_SUCCESS) {
1108 atomic_long_inc(&session->tunnel->stats.tx_packets);
1109 atomic_long_add(len, &session->tunnel->stats.tx_bytes);
1110 atomic_long_inc(&session->stats.tx_packets);
1111 atomic_long_add(len, &session->stats.tx_bytes);
1112 } else {
1113 atomic_long_inc(&session->tunnel->stats.tx_errors);
1114 atomic_long_inc(&session->stats.tx_errors);
1115 }
1116 return ret;
1117}
1118EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128static void l2tp_tunnel_destruct(struct sock *sk)
1129{
1130 struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
1131
1132 if (!tunnel)
1133 goto end;
1134
1135
1136 switch (tunnel->encap) {
1137 case L2TP_ENCAPTYPE_UDP:
1138
1139 (udp_sk(sk))->encap_type = 0;
1140 (udp_sk(sk))->encap_rcv = NULL;
1141 (udp_sk(sk))->encap_destroy = NULL;
1142 break;
1143 case L2TP_ENCAPTYPE_IP:
1144 break;
1145 }
1146
1147
1148 sk->sk_destruct = tunnel->old_sk_destruct;
1149 sk->sk_user_data = NULL;
1150
1151
1152 if (sk->sk_destruct)
1153 (*sk->sk_destruct)(sk);
1154
1155 kfree_rcu(tunnel, rcu);
1156end:
1157 return;
1158}
1159
1160
1161static void l2tp_session_unhash(struct l2tp_session *session)
1162{
1163 struct l2tp_tunnel *tunnel = session->tunnel;
1164
1165
1166 if (tunnel) {
1167
1168 write_lock_bh(&tunnel->hlist_lock);
1169 hlist_del_init(&session->hlist);
1170 write_unlock_bh(&tunnel->hlist_lock);
1171
1172
1173 if (tunnel->version != L2TP_HDR_VER_2) {
1174 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1175
1176 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1177 hlist_del_init_rcu(&session->global_hlist);
1178 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1179 synchronize_rcu();
1180 }
1181 }
1182}
1183
1184
1185
1186static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1187{
1188 int hash;
1189 struct hlist_node *walk;
1190 struct hlist_node *tmp;
1191 struct l2tp_session *session;
1192
1193 write_lock_bh(&tunnel->hlist_lock);
1194 tunnel->acpt_newsess = false;
1195 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
1196again:
1197 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1198 session = hlist_entry(walk, struct l2tp_session, hlist);
1199 hlist_del_init(&session->hlist);
1200
1201 write_unlock_bh(&tunnel->hlist_lock);
1202 l2tp_session_delete(session);
1203 write_lock_bh(&tunnel->hlist_lock);
1204
1205
1206
1207
1208
1209
1210 goto again;
1211 }
1212 }
1213 write_unlock_bh(&tunnel->hlist_lock);
1214}
1215
1216
1217static void l2tp_udp_encap_destroy(struct sock *sk)
1218{
1219 struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
1220
1221 if (tunnel)
1222 l2tp_tunnel_delete(tunnel);
1223}
1224
1225
1226static void l2tp_tunnel_del_work(struct work_struct *work)
1227{
1228 struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
1229 del_work);
1230 struct sock *sk = tunnel->sock;
1231 struct socket *sock = sk->sk_socket;
1232 struct l2tp_net *pn;
1233
1234 l2tp_tunnel_closeall(tunnel);
1235
1236
1237
1238
1239 if (tunnel->fd < 0) {
1240 if (sock) {
1241 kernel_sock_shutdown(sock, SHUT_RDWR);
1242 sock_release(sock);
1243 }
1244 }
1245
1246
1247 pn = l2tp_pernet(tunnel->l2tp_net);
1248 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1249 list_del_rcu(&tunnel->list);
1250 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1251
1252
1253 l2tp_tunnel_dec_refcount(tunnel);
1254
1255
1256 l2tp_tunnel_dec_refcount(tunnel);
1257}
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268static int l2tp_tunnel_sock_create(struct net *net,
1269 u32 tunnel_id,
1270 u32 peer_tunnel_id,
1271 struct l2tp_tunnel_cfg *cfg,
1272 struct socket **sockp)
1273{
1274 int err = -EINVAL;
1275 struct socket *sock = NULL;
1276 struct udp_port_cfg udp_conf;
1277
1278 switch (cfg->encap) {
1279 case L2TP_ENCAPTYPE_UDP:
1280 memset(&udp_conf, 0, sizeof(udp_conf));
1281
1282#if IS_ENABLED(CONFIG_IPV6)
1283 if (cfg->local_ip6 && cfg->peer_ip6) {
1284 udp_conf.family = AF_INET6;
1285 memcpy(&udp_conf.local_ip6, cfg->local_ip6,
1286 sizeof(udp_conf.local_ip6));
1287 memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
1288 sizeof(udp_conf.peer_ip6));
1289 udp_conf.use_udp6_tx_checksums =
1290 !cfg->udp6_zero_tx_checksums;
1291 udp_conf.use_udp6_rx_checksums =
1292 !cfg->udp6_zero_rx_checksums;
1293 } else
1294#endif
1295 {
1296 udp_conf.family = AF_INET;
1297 udp_conf.local_ip = cfg->local_ip;
1298 udp_conf.peer_ip = cfg->peer_ip;
1299 udp_conf.use_udp_checksums = cfg->use_udp_checksums;
1300 }
1301
1302 udp_conf.local_udp_port = htons(cfg->local_udp_port);
1303 udp_conf.peer_udp_port = htons(cfg->peer_udp_port);
1304
1305 err = udp_sock_create(net, &udp_conf, &sock);
1306 if (err < 0)
1307 goto out;
1308
1309 break;
1310
1311 case L2TP_ENCAPTYPE_IP:
1312#if IS_ENABLED(CONFIG_IPV6)
1313 if (cfg->local_ip6 && cfg->peer_ip6) {
1314 struct sockaddr_l2tpip6 ip6_addr = {0};
1315
1316 err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
1317 IPPROTO_L2TP, &sock);
1318 if (err < 0)
1319 goto out;
1320
1321 ip6_addr.l2tp_family = AF_INET6;
1322 memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1323 sizeof(ip6_addr.l2tp_addr));
1324 ip6_addr.l2tp_conn_id = tunnel_id;
1325 err = kernel_bind(sock, (struct sockaddr *)&ip6_addr,
1326 sizeof(ip6_addr));
1327 if (err < 0)
1328 goto out;
1329
1330 ip6_addr.l2tp_family = AF_INET6;
1331 memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1332 sizeof(ip6_addr.l2tp_addr));
1333 ip6_addr.l2tp_conn_id = peer_tunnel_id;
1334 err = kernel_connect(sock,
1335 (struct sockaddr *)&ip6_addr,
1336 sizeof(ip6_addr), 0);
1337 if (err < 0)
1338 goto out;
1339 } else
1340#endif
1341 {
1342 struct sockaddr_l2tpip ip_addr = {0};
1343
1344 err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
1345 IPPROTO_L2TP, &sock);
1346 if (err < 0)
1347 goto out;
1348
1349 ip_addr.l2tp_family = AF_INET;
1350 ip_addr.l2tp_addr = cfg->local_ip;
1351 ip_addr.l2tp_conn_id = tunnel_id;
1352 err = kernel_bind(sock, (struct sockaddr *)&ip_addr,
1353 sizeof(ip_addr));
1354 if (err < 0)
1355 goto out;
1356
1357 ip_addr.l2tp_family = AF_INET;
1358 ip_addr.l2tp_addr = cfg->peer_ip;
1359 ip_addr.l2tp_conn_id = peer_tunnel_id;
1360 err = kernel_connect(sock, (struct sockaddr *)&ip_addr,
1361 sizeof(ip_addr), 0);
1362 if (err < 0)
1363 goto out;
1364 }
1365 break;
1366
1367 default:
1368 goto out;
1369 }
1370
1371out:
1372 *sockp = sock;
1373 if (err < 0 && sock) {
1374 kernel_sock_shutdown(sock, SHUT_RDWR);
1375 sock_release(sock);
1376 *sockp = NULL;
1377 }
1378
1379 return err;
1380}
1381
1382static struct lock_class_key l2tp_socket_class;
1383
1384int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
1385 struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1386{
1387 struct l2tp_tunnel *tunnel = NULL;
1388 int err;
1389 enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1390
1391 if (cfg)
1392 encap = cfg->encap;
1393
1394 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
1395 if (!tunnel) {
1396 err = -ENOMEM;
1397 goto err;
1398 }
1399
1400 tunnel->version = version;
1401 tunnel->tunnel_id = tunnel_id;
1402 tunnel->peer_tunnel_id = peer_tunnel_id;
1403
1404 tunnel->magic = L2TP_TUNNEL_MAGIC;
1405 sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1406 rwlock_init(&tunnel->hlist_lock);
1407 tunnel->acpt_newsess = true;
1408
1409 tunnel->encap = encap;
1410
1411 refcount_set(&tunnel->ref_count, 1);
1412 tunnel->fd = fd;
1413
1414
1415 INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1416
1417 INIT_LIST_HEAD(&tunnel->list);
1418
1419 err = 0;
1420err:
1421 if (tunnelp)
1422 *tunnelp = tunnel;
1423
1424 return err;
1425}
1426EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1427
1428static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
1429 enum l2tp_encap_type encap)
1430{
1431 if (!net_eq(sock_net(sk), net))
1432 return -EINVAL;
1433
1434 if (sk->sk_type != SOCK_DGRAM)
1435 return -EPROTONOSUPPORT;
1436
1437 if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
1438 return -EPROTONOSUPPORT;
1439
1440 if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
1441 (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
1442 return -EPROTONOSUPPORT;
1443
1444 if (sk->sk_user_data)
1445 return -EBUSY;
1446
1447 return 0;
1448}
1449
1450int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1451 struct l2tp_tunnel_cfg *cfg)
1452{
1453 struct l2tp_tunnel *tunnel_walk;
1454 struct l2tp_net *pn;
1455 struct socket *sock;
1456 struct sock *sk;
1457 int ret;
1458
1459 if (tunnel->fd < 0) {
1460 ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
1461 tunnel->peer_tunnel_id, cfg,
1462 &sock);
1463 if (ret < 0)
1464 goto err;
1465 } else {
1466 sock = sockfd_lookup(tunnel->fd, &ret);
1467 if (!sock)
1468 goto err;
1469
1470 ret = l2tp_validate_socket(sock->sk, net, tunnel->encap);
1471 if (ret < 0)
1472 goto err_sock;
1473 }
1474
1475 tunnel->l2tp_net = net;
1476 pn = l2tp_pernet(net);
1477
1478 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1479 list_for_each_entry(tunnel_walk, &pn->l2tp_tunnel_list, list) {
1480 if (tunnel_walk->tunnel_id == tunnel->tunnel_id) {
1481 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1482
1483 ret = -EEXIST;
1484 goto err_sock;
1485 }
1486 }
1487 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1488 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1489
1490 sk = sock->sk;
1491 sock_hold(sk);
1492 tunnel->sock = sk;
1493
1494 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1495 struct udp_tunnel_sock_cfg udp_cfg = {
1496 .sk_user_data = tunnel,
1497 .encap_type = UDP_ENCAP_L2TPINUDP,
1498 .encap_rcv = l2tp_udp_encap_recv,
1499 .encap_destroy = l2tp_udp_encap_destroy,
1500 };
1501
1502 setup_udp_tunnel_sock(net, sock, &udp_cfg);
1503 } else {
1504 sk->sk_user_data = tunnel;
1505 }
1506
1507 tunnel->old_sk_destruct = sk->sk_destruct;
1508 sk->sk_destruct = &l2tp_tunnel_destruct;
1509 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class,
1510 "l2tp_sock");
1511 sk->sk_allocation = GFP_ATOMIC;
1512
1513 trace_register_tunnel(tunnel);
1514
1515 if (tunnel->fd >= 0)
1516 sockfd_put(sock);
1517
1518 return 0;
1519
1520err_sock:
1521 if (tunnel->fd < 0)
1522 sock_release(sock);
1523 else
1524 sockfd_put(sock);
1525err:
1526 return ret;
1527}
1528EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
1529
1530
1531
1532void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1533{
1534 if (!test_and_set_bit(0, &tunnel->dead)) {
1535 trace_delete_tunnel(tunnel);
1536 l2tp_tunnel_inc_refcount(tunnel);
1537 queue_work(l2tp_wq, &tunnel->del_work);
1538 }
1539}
1540EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1541
1542void l2tp_session_delete(struct l2tp_session *session)
1543{
1544 if (test_and_set_bit(0, &session->dead))
1545 return;
1546
1547 trace_delete_session(session);
1548 l2tp_session_unhash(session);
1549 l2tp_session_queue_purge(session);
1550 if (session->session_close)
1551 (*session->session_close)(session);
1552
1553 l2tp_session_dec_refcount(session);
1554}
1555EXPORT_SYMBOL_GPL(l2tp_session_delete);
1556
1557
1558
1559
1560void l2tp_session_set_header_len(struct l2tp_session *session, int version)
1561{
1562 if (version == L2TP_HDR_VER_2) {
1563 session->hdr_len = 6;
1564 if (session->send_seq)
1565 session->hdr_len += 4;
1566 } else {
1567 session->hdr_len = 4 + session->cookie_len;
1568 session->hdr_len += l2tp_get_l2specific_len(session);
1569 if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
1570 session->hdr_len += 4;
1571 }
1572}
1573EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1574
1575struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id,
1576 u32 peer_session_id, struct l2tp_session_cfg *cfg)
1577{
1578 struct l2tp_session *session;
1579
1580 session = kzalloc(sizeof(*session) + priv_size, GFP_KERNEL);
1581 if (session) {
1582 session->magic = L2TP_SESSION_MAGIC;
1583 session->tunnel = tunnel;
1584
1585 session->session_id = session_id;
1586 session->peer_session_id = peer_session_id;
1587 session->nr = 0;
1588 if (tunnel->version == L2TP_HDR_VER_2)
1589 session->nr_max = 0xffff;
1590 else
1591 session->nr_max = 0xffffff;
1592 session->nr_window_size = session->nr_max / 2;
1593 session->nr_oos_count_max = 4;
1594
1595
1596 session->reorder_skip = 1;
1597
1598 sprintf(&session->name[0], "sess %u/%u",
1599 tunnel->tunnel_id, session->session_id);
1600
1601 skb_queue_head_init(&session->reorder_q);
1602
1603 INIT_HLIST_NODE(&session->hlist);
1604 INIT_HLIST_NODE(&session->global_hlist);
1605
1606 if (cfg) {
1607 session->pwtype = cfg->pw_type;
1608 session->send_seq = cfg->send_seq;
1609 session->recv_seq = cfg->recv_seq;
1610 session->lns_mode = cfg->lns_mode;
1611 session->reorder_timeout = cfg->reorder_timeout;
1612 session->l2specific_type = cfg->l2specific_type;
1613 session->cookie_len = cfg->cookie_len;
1614 memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1615 session->peer_cookie_len = cfg->peer_cookie_len;
1616 memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1617 }
1618
1619 l2tp_session_set_header_len(session, tunnel->version);
1620
1621 refcount_set(&session->ref_count, 1);
1622
1623 return session;
1624 }
1625
1626 return ERR_PTR(-ENOMEM);
1627}
1628EXPORT_SYMBOL_GPL(l2tp_session_create);
1629
1630
1631
1632
1633
1634static __net_init int l2tp_init_net(struct net *net)
1635{
1636 struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1637 int hash;
1638
1639 INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
1640 spin_lock_init(&pn->l2tp_tunnel_list_lock);
1641
1642 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1643 INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
1644
1645 spin_lock_init(&pn->l2tp_session_hlist_lock);
1646
1647 return 0;
1648}
1649
1650static __net_exit void l2tp_exit_net(struct net *net)
1651{
1652 struct l2tp_net *pn = l2tp_pernet(net);
1653 struct l2tp_tunnel *tunnel = NULL;
1654 int hash;
1655
1656 rcu_read_lock_bh();
1657 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
1658 l2tp_tunnel_delete(tunnel);
1659 }
1660 rcu_read_unlock_bh();
1661
1662 if (l2tp_wq)
1663 flush_workqueue(l2tp_wq);
1664 rcu_barrier();
1665
1666 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1667 WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash]));
1668}
1669
1670static struct pernet_operations l2tp_net_ops = {
1671 .init = l2tp_init_net,
1672 .exit = l2tp_exit_net,
1673 .id = &l2tp_net_id,
1674 .size = sizeof(struct l2tp_net),
1675};
1676
1677static int __init l2tp_init(void)
1678{
1679 int rc = 0;
1680
1681 rc = register_pernet_device(&l2tp_net_ops);
1682 if (rc)
1683 goto out;
1684
1685 l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
1686 if (!l2tp_wq) {
1687 pr_err("alloc_workqueue failed\n");
1688 unregister_pernet_device(&l2tp_net_ops);
1689 rc = -ENOMEM;
1690 goto out;
1691 }
1692
1693 pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1694
1695out:
1696 return rc;
1697}
1698
1699static void __exit l2tp_exit(void)
1700{
1701 unregister_pernet_device(&l2tp_net_ops);
1702 if (l2tp_wq) {
1703 destroy_workqueue(l2tp_wq);
1704 l2tp_wq = NULL;
1705 }
1706}
1707
1708module_init(l2tp_init);
1709module_exit(l2tp_exit);
1710
1711MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1712MODULE_DESCRIPTION("L2TP core");
1713MODULE_LICENSE("GPL");
1714MODULE_VERSION(L2TP_DRV_VERSION);
1715