1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/module.h>
21#include <linux/string.h>
22#include <linux/list.h>
23#include <linux/rculist.h>
24#include <linux/uaccess.h>
25
26#include <linux/kernel.h>
27#include <linux/spinlock.h>
28#include <linux/kthread.h>
29#include <linux/sched.h>
30#include <linux/slab.h>
31#include <linux/errno.h>
32#include <linux/jiffies.h>
33
34#include <linux/netdevice.h>
35#include <linux/net.h>
36#include <linux/inetdevice.h>
37#include <linux/skbuff.h>
38#include <linux/init.h>
39#include <linux/in.h>
40#include <linux/ip.h>
41#include <linux/udp.h>
42#include <linux/l2tp.h>
43#include <linux/hash.h>
44#include <linux/sort.h>
45#include <linux/file.h>
46#include <linux/nsproxy.h>
47#include <net/net_namespace.h>
48#include <net/netns/generic.h>
49#include <net/dst.h>
50#include <net/ip.h>
51#include <net/udp.h>
52#include <net/udp_tunnel.h>
53#include <net/inet_common.h>
54#include <net/xfrm.h>
55#include <net/protocol.h>
56#include <net/inet6_connection_sock.h>
57#include <net/inet_ecn.h>
58#include <net/ip6_route.h>
59#include <net/ip6_checksum.h>
60
61#include <asm/byteorder.h>
62#include <linux/atomic.h>
63
64#include "l2tp_core.h"
65
66#define L2TP_DRV_VERSION "V2.0"
67
68
69#define L2TP_HDRFLAG_T 0x8000
70#define L2TP_HDRFLAG_L 0x4000
71#define L2TP_HDRFLAG_S 0x0800
72#define L2TP_HDRFLAG_O 0x0200
73#define L2TP_HDRFLAG_P 0x0100
74
75#define L2TP_HDR_VER_MASK 0x000F
76#define L2TP_HDR_VER_2 0x0002
77#define L2TP_HDR_VER_3 0x0003
78
79
80#define L2TP_SLFLAG_S 0x40000000
81#define L2TP_SL_SEQ_MASK 0x00ffffff
82
83#define L2TP_HDR_SIZE_MAX 14
84
85
86#define L2TP_DEFAULT_DEBUG_FLAGS 0
87
88
89
90struct l2tp_skb_cb {
91 u32 ns;
92 u16 has_seq;
93 u16 length;
94 unsigned long expires;
95};
96
97#define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
98
99static struct workqueue_struct *l2tp_wq;
100
101
102static unsigned int l2tp_net_id;
103struct l2tp_net {
104 struct list_head l2tp_tunnel_list;
105 spinlock_t l2tp_tunnel_list_lock;
106 struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
107 spinlock_t l2tp_session_hlist_lock;
108};
109
110#if IS_ENABLED(CONFIG_IPV6)
111static bool l2tp_sk_is_v6(struct sock *sk)
112{
113 return sk->sk_family == PF_INET6 &&
114 !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
115}
116#endif
117
118static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
119{
120 return sk->sk_user_data;
121}
122
123static inline struct l2tp_net *l2tp_pernet(const struct net *net)
124{
125 BUG_ON(!net);
126
127 return net_generic(net, l2tp_net_id);
128}
129
130
131
132
133
134
135static inline struct hlist_head *
136l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
137{
138 return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
139
140}
141
142
143
144
145
146
147
148static inline struct hlist_head *
149l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
150{
151 return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
152}
153
154void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
155{
156 sock_put(tunnel->sock);
157
158}
159EXPORT_SYMBOL(l2tp_tunnel_free);
160
161
162struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
163{
164 const struct l2tp_net *pn = l2tp_pernet(net);
165 struct l2tp_tunnel *tunnel;
166
167 rcu_read_lock_bh();
168 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
169 if (tunnel->tunnel_id == tunnel_id &&
170 refcount_inc_not_zero(&tunnel->ref_count)) {
171 rcu_read_unlock_bh();
172
173 return tunnel;
174 }
175 }
176 rcu_read_unlock_bh();
177
178 return NULL;
179}
180EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
181
182struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
183{
184 const struct l2tp_net *pn = l2tp_pernet(net);
185 struct l2tp_tunnel *tunnel;
186 int count = 0;
187
188 rcu_read_lock_bh();
189 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
190 if (++count > nth &&
191 refcount_inc_not_zero(&tunnel->ref_count)) {
192 rcu_read_unlock_bh();
193 return tunnel;
194 }
195 }
196 rcu_read_unlock_bh();
197
198 return NULL;
199}
200EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth);
201
202struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel,
203 u32 session_id)
204{
205 struct hlist_head *session_list;
206 struct l2tp_session *session;
207
208 session_list = l2tp_session_id_hash(tunnel, session_id);
209
210 read_lock_bh(&tunnel->hlist_lock);
211 hlist_for_each_entry(session, session_list, hlist)
212 if (session->session_id == session_id) {
213 l2tp_session_inc_refcount(session);
214 read_unlock_bh(&tunnel->hlist_lock);
215
216 return session;
217 }
218 read_unlock_bh(&tunnel->hlist_lock);
219
220 return NULL;
221}
222EXPORT_SYMBOL_GPL(l2tp_tunnel_get_session);
223
224struct l2tp_session *l2tp_session_get(const struct net *net, u32 session_id)
225{
226 struct hlist_head *session_list;
227 struct l2tp_session *session;
228
229 session_list = l2tp_session_id_hash_2(l2tp_pernet(net), session_id);
230
231 rcu_read_lock_bh();
232 hlist_for_each_entry_rcu(session, session_list, global_hlist)
233 if (session->session_id == session_id) {
234 l2tp_session_inc_refcount(session);
235 rcu_read_unlock_bh();
236
237 return session;
238 }
239 rcu_read_unlock_bh();
240
241 return NULL;
242}
243EXPORT_SYMBOL_GPL(l2tp_session_get);
244
245struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth)
246{
247 int hash;
248 struct l2tp_session *session;
249 int count = 0;
250
251 read_lock_bh(&tunnel->hlist_lock);
252 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
253 hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
254 if (++count > nth) {
255 l2tp_session_inc_refcount(session);
256 read_unlock_bh(&tunnel->hlist_lock);
257 return session;
258 }
259 }
260 }
261
262 read_unlock_bh(&tunnel->hlist_lock);
263
264 return NULL;
265}
266EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
267
268
269
270
271struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
272 const char *ifname)
273{
274 struct l2tp_net *pn = l2tp_pernet(net);
275 int hash;
276 struct l2tp_session *session;
277
278 rcu_read_lock_bh();
279 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
280 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
281 if (!strcmp(session->ifname, ifname)) {
282 l2tp_session_inc_refcount(session);
283 rcu_read_unlock_bh();
284
285 return session;
286 }
287 }
288 }
289
290 rcu_read_unlock_bh();
291
292 return NULL;
293}
294EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
295
296int l2tp_session_register(struct l2tp_session *session,
297 struct l2tp_tunnel *tunnel)
298{
299 struct l2tp_session *session_walk;
300 struct hlist_head *g_head;
301 struct hlist_head *head;
302 struct l2tp_net *pn;
303 int err;
304
305 head = l2tp_session_id_hash(tunnel, session->session_id);
306
307 write_lock_bh(&tunnel->hlist_lock);
308 if (!tunnel->acpt_newsess) {
309 err = -ENODEV;
310 goto err_tlock;
311 }
312
313 hlist_for_each_entry(session_walk, head, hlist)
314 if (session_walk->session_id == session->session_id) {
315 err = -EEXIST;
316 goto err_tlock;
317 }
318
319 if (tunnel->version == L2TP_HDR_VER_3) {
320 pn = l2tp_pernet(tunnel->l2tp_net);
321 g_head = l2tp_session_id_hash_2(pn, session->session_id);
322
323 spin_lock_bh(&pn->l2tp_session_hlist_lock);
324
325 hlist_for_each_entry(session_walk, g_head, global_hlist)
326 if (session_walk->session_id == session->session_id) {
327 err = -EEXIST;
328 goto err_tlock_pnlock;
329 }
330
331 l2tp_tunnel_inc_refcount(tunnel);
332 hlist_add_head_rcu(&session->global_hlist, g_head);
333
334 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
335 } else {
336 l2tp_tunnel_inc_refcount(tunnel);
337 }
338
339 hlist_add_head(&session->hlist, head);
340 write_unlock_bh(&tunnel->hlist_lock);
341
342 return 0;
343
344err_tlock_pnlock:
345 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
346err_tlock:
347 write_unlock_bh(&tunnel->hlist_lock);
348
349 return err;
350}
351EXPORT_SYMBOL_GPL(l2tp_session_register);
352
353
354
355
356
357
358
359
360static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
361{
362 struct sk_buff *skbp;
363 struct sk_buff *tmp;
364 u32 ns = L2TP_SKB_CB(skb)->ns;
365
366 spin_lock_bh(&session->reorder_q.lock);
367 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
368 if (L2TP_SKB_CB(skbp)->ns > ns) {
369 __skb_queue_before(&session->reorder_q, skbp, skb);
370 l2tp_dbg(session, L2TP_MSG_SEQ,
371 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
372 session->name, ns, L2TP_SKB_CB(skbp)->ns,
373 skb_queue_len(&session->reorder_q));
374 atomic_long_inc(&session->stats.rx_oos_packets);
375 goto out;
376 }
377 }
378
379 __skb_queue_tail(&session->reorder_q, skb);
380
381out:
382 spin_unlock_bh(&session->reorder_q.lock);
383}
384
385
386
387static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
388{
389 struct l2tp_tunnel *tunnel = session->tunnel;
390 int length = L2TP_SKB_CB(skb)->length;
391
392
393
394
395 skb_orphan(skb);
396
397 atomic_long_inc(&tunnel->stats.rx_packets);
398 atomic_long_add(length, &tunnel->stats.rx_bytes);
399 atomic_long_inc(&session->stats.rx_packets);
400 atomic_long_add(length, &session->stats.rx_bytes);
401
402 if (L2TP_SKB_CB(skb)->has_seq) {
403
404 session->nr++;
405 session->nr &= session->nr_max;
406
407 l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated nr to %hu\n",
408 session->name, session->nr);
409 }
410
411
412 if (session->recv_skb != NULL)
413 (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
414 else
415 kfree_skb(skb);
416}
417
418
419
420
421static void l2tp_recv_dequeue(struct l2tp_session *session)
422{
423 struct sk_buff *skb;
424 struct sk_buff *tmp;
425
426
427
428
429
430start:
431 spin_lock_bh(&session->reorder_q.lock);
432 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
433 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
434 atomic_long_inc(&session->stats.rx_seq_discards);
435 atomic_long_inc(&session->stats.rx_errors);
436 l2tp_dbg(session, L2TP_MSG_SEQ,
437 "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",
438 session->name, L2TP_SKB_CB(skb)->ns,
439 L2TP_SKB_CB(skb)->length, session->nr,
440 skb_queue_len(&session->reorder_q));
441 session->reorder_skip = 1;
442 __skb_unlink(skb, &session->reorder_q);
443 kfree_skb(skb);
444 continue;
445 }
446
447 if (L2TP_SKB_CB(skb)->has_seq) {
448 if (session->reorder_skip) {
449 l2tp_dbg(session, L2TP_MSG_SEQ,
450 "%s: advancing nr to next pkt: %u -> %u",
451 session->name, session->nr,
452 L2TP_SKB_CB(skb)->ns);
453 session->reorder_skip = 0;
454 session->nr = L2TP_SKB_CB(skb)->ns;
455 }
456 if (L2TP_SKB_CB(skb)->ns != session->nr) {
457 l2tp_dbg(session, L2TP_MSG_SEQ,
458 "%s: holding oos pkt %u len %d, waiting for %u, reorder_q_len=%d\n",
459 session->name, L2TP_SKB_CB(skb)->ns,
460 L2TP_SKB_CB(skb)->length, session->nr,
461 skb_queue_len(&session->reorder_q));
462 goto out;
463 }
464 }
465 __skb_unlink(skb, &session->reorder_q);
466
467
468
469
470 spin_unlock_bh(&session->reorder_q.lock);
471 l2tp_recv_dequeue_skb(session, skb);
472 goto start;
473 }
474
475out:
476 spin_unlock_bh(&session->reorder_q.lock);
477}
478
479static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
480{
481 u32 nws;
482
483 if (nr >= session->nr)
484 nws = nr - session->nr;
485 else
486 nws = (session->nr_max + 1) - (session->nr - nr);
487
488 return nws < session->nr_window_size;
489}
490
491
492
493
494static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
495{
496 if (!l2tp_seq_check_rx_window(session, L2TP_SKB_CB(skb)->ns)) {
497
498
499
500 l2tp_dbg(session, L2TP_MSG_SEQ,
501 "%s: pkt %u len %d discarded, outside window, nr=%u\n",
502 session->name, L2TP_SKB_CB(skb)->ns,
503 L2TP_SKB_CB(skb)->length, session->nr);
504 goto discard;
505 }
506
507 if (session->reorder_timeout != 0) {
508
509
510
511 l2tp_recv_queue_skb(session, skb);
512 goto out;
513 }
514
515
516
517
518
519
520 if (L2TP_SKB_CB(skb)->ns == session->nr) {
521 skb_queue_tail(&session->reorder_q, skb);
522 } else {
523 u32 nr_oos = L2TP_SKB_CB(skb)->ns;
524 u32 nr_next = (session->nr_oos + 1) & session->nr_max;
525
526 if (nr_oos == nr_next)
527 session->nr_oos_count++;
528 else
529 session->nr_oos_count = 0;
530
531 session->nr_oos = nr_oos;
532 if (session->nr_oos_count > session->nr_oos_count_max) {
533 session->reorder_skip = 1;
534 l2tp_dbg(session, L2TP_MSG_SEQ,
535 "%s: %d oos packets received. Resetting sequence numbers\n",
536 session->name, session->nr_oos_count);
537 }
538 if (!session->reorder_skip) {
539 atomic_long_inc(&session->stats.rx_seq_discards);
540 l2tp_dbg(session, L2TP_MSG_SEQ,
541 "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",
542 session->name, L2TP_SKB_CB(skb)->ns,
543 L2TP_SKB_CB(skb)->length, session->nr,
544 skb_queue_len(&session->reorder_q));
545 goto discard;
546 }
547 skb_queue_tail(&session->reorder_q, skb);
548 }
549
550out:
551 return 0;
552
553discard:
554 return 1;
555}
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
617 unsigned char *ptr, unsigned char *optr, u16 hdrflags,
618 int length)
619{
620 struct l2tp_tunnel *tunnel = session->tunnel;
621 int offset;
622 u32 ns, nr;
623
624
625 if (session->peer_cookie_len > 0) {
626 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
627 l2tp_info(tunnel, L2TP_MSG_DATA,
628 "%s: cookie mismatch (%u/%u). Discarding.\n",
629 tunnel->name, tunnel->tunnel_id,
630 session->session_id);
631 atomic_long_inc(&session->stats.rx_cookie_discards);
632 goto discard;
633 }
634 ptr += session->peer_cookie_len;
635 }
636
637
638
639
640
641
642
643
644 ns = nr = 0;
645 L2TP_SKB_CB(skb)->has_seq = 0;
646 if (tunnel->version == L2TP_HDR_VER_2) {
647 if (hdrflags & L2TP_HDRFLAG_S) {
648 ns = ntohs(*(__be16 *) ptr);
649 ptr += 2;
650 nr = ntohs(*(__be16 *) ptr);
651 ptr += 2;
652
653
654 L2TP_SKB_CB(skb)->ns = ns;
655 L2TP_SKB_CB(skb)->has_seq = 1;
656
657 l2tp_dbg(session, L2TP_MSG_SEQ,
658 "%s: recv data ns=%u, nr=%u, session nr=%u\n",
659 session->name, ns, nr, session->nr);
660 }
661 } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
662 u32 l2h = ntohl(*(__be32 *) ptr);
663
664 if (l2h & 0x40000000) {
665 ns = l2h & 0x00ffffff;
666
667
668 L2TP_SKB_CB(skb)->ns = ns;
669 L2TP_SKB_CB(skb)->has_seq = 1;
670
671 l2tp_dbg(session, L2TP_MSG_SEQ,
672 "%s: recv data ns=%u, session nr=%u\n",
673 session->name, ns, session->nr);
674 }
675 ptr += 4;
676 }
677
678 if (L2TP_SKB_CB(skb)->has_seq) {
679
680
681
682
683 if ((!session->lns_mode) && (!session->send_seq)) {
684 l2tp_info(session, L2TP_MSG_SEQ,
685 "%s: requested to enable seq numbers by LNS\n",
686 session->name);
687 session->send_seq = 1;
688 l2tp_session_set_header_len(session, tunnel->version);
689 }
690 } else {
691
692
693
694 if (session->recv_seq) {
695 l2tp_warn(session, L2TP_MSG_SEQ,
696 "%s: recv data has no seq numbers when required. Discarding.\n",
697 session->name);
698 atomic_long_inc(&session->stats.rx_seq_discards);
699 goto discard;
700 }
701
702
703
704
705
706
707 if ((!session->lns_mode) && (session->send_seq)) {
708 l2tp_info(session, L2TP_MSG_SEQ,
709 "%s: requested to disable seq numbers by LNS\n",
710 session->name);
711 session->send_seq = 0;
712 l2tp_session_set_header_len(session, tunnel->version);
713 } else if (session->send_seq) {
714 l2tp_warn(session, L2TP_MSG_SEQ,
715 "%s: recv data has no seq numbers when required. Discarding.\n",
716 session->name);
717 atomic_long_inc(&session->stats.rx_seq_discards);
718 goto discard;
719 }
720 }
721
722
723
724
725 if (tunnel->version == L2TP_HDR_VER_2) {
726
727 if (hdrflags & L2TP_HDRFLAG_O) {
728 offset = ntohs(*(__be16 *)ptr);
729 ptr += 2 + offset;
730 }
731 }
732
733 offset = ptr - optr;
734 if (!pskb_may_pull(skb, offset))
735 goto discard;
736
737 __skb_pull(skb, offset);
738
739
740
741
742
743 L2TP_SKB_CB(skb)->length = length;
744 L2TP_SKB_CB(skb)->expires = jiffies +
745 (session->reorder_timeout ? session->reorder_timeout : HZ);
746
747
748
749
750 if (L2TP_SKB_CB(skb)->has_seq) {
751 if (l2tp_recv_data_seq(session, skb))
752 goto discard;
753 } else {
754
755
756
757
758 skb_queue_tail(&session->reorder_q, skb);
759 }
760
761
762 l2tp_recv_dequeue(session);
763
764 return;
765
766discard:
767 atomic_long_inc(&session->stats.rx_errors);
768 kfree_skb(skb);
769}
770EXPORT_SYMBOL(l2tp_recv_common);
771
772
773
774static int l2tp_session_queue_purge(struct l2tp_session *session)
775{
776 struct sk_buff *skb = NULL;
777 BUG_ON(!session);
778 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
779 while ((skb = skb_dequeue(&session->reorder_q))) {
780 atomic_long_inc(&session->stats.rx_errors);
781 kfree_skb(skb);
782 }
783 return 0;
784}
785
786
787
788
789
790
791
792static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
793{
794 struct l2tp_session *session = NULL;
795 unsigned char *ptr, *optr;
796 u16 hdrflags;
797 u32 tunnel_id, session_id;
798 u16 version;
799 int length;
800
801
802
803
804 __skb_pull(skb, sizeof(struct udphdr));
805
806
807 if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
808 l2tp_info(tunnel, L2TP_MSG_DATA,
809 "%s: recv short packet (len=%d)\n",
810 tunnel->name, skb->len);
811 goto error;
812 }
813
814
815 if (tunnel->debug & L2TP_MSG_DATA) {
816 length = min(32u, skb->len);
817 if (!pskb_may_pull(skb, length))
818 goto error;
819
820 pr_debug("%s: recv\n", tunnel->name);
821 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
822 }
823
824
825 optr = ptr = skb->data;
826
827
828 hdrflags = ntohs(*(__be16 *) ptr);
829
830
831 version = hdrflags & L2TP_HDR_VER_MASK;
832 if (version != tunnel->version) {
833 l2tp_info(tunnel, L2TP_MSG_DATA,
834 "%s: recv protocol version mismatch: got %d expected %d\n",
835 tunnel->name, version, tunnel->version);
836 goto error;
837 }
838
839
840 length = skb->len;
841
842
843 if (hdrflags & L2TP_HDRFLAG_T) {
844 l2tp_dbg(tunnel, L2TP_MSG_DATA,
845 "%s: recv control packet, len=%d\n",
846 tunnel->name, length);
847 goto error;
848 }
849
850
851 ptr += 2;
852
853 if (tunnel->version == L2TP_HDR_VER_2) {
854
855 if (hdrflags & L2TP_HDRFLAG_L)
856 ptr += 2;
857
858
859 tunnel_id = ntohs(*(__be16 *) ptr);
860 ptr += 2;
861 session_id = ntohs(*(__be16 *) ptr);
862 ptr += 2;
863 } else {
864 ptr += 2;
865 tunnel_id = tunnel->tunnel_id;
866 session_id = ntohl(*(__be32 *) ptr);
867 ptr += 4;
868 }
869
870
871 session = l2tp_tunnel_get_session(tunnel, session_id);
872 if (!session || !session->recv_skb) {
873 if (session)
874 l2tp_session_dec_refcount(session);
875
876
877 l2tp_info(tunnel, L2TP_MSG_DATA,
878 "%s: no session found (%u/%u). Passing up.\n",
879 tunnel->name, tunnel_id, session_id);
880 goto error;
881 }
882
883 if (tunnel->version == L2TP_HDR_VER_3 &&
884 l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
885 goto error;
886
887 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
888 l2tp_session_dec_refcount(session);
889
890 return 0;
891
892error:
893
894 __skb_push(skb, sizeof(struct udphdr));
895
896 return 1;
897}
898
899
900
901
902
903
904
905int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
906{
907 struct l2tp_tunnel *tunnel;
908
909 tunnel = rcu_dereference_sk_user_data(sk);
910 if (tunnel == NULL)
911 goto pass_up;
912
913 l2tp_dbg(tunnel, L2TP_MSG_DATA, "%s: received %d bytes\n",
914 tunnel->name, skb->len);
915
916 if (l2tp_udp_recv_core(tunnel, skb))
917 goto pass_up;
918
919 return 0;
920
921pass_up:
922 return 1;
923}
924EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
925
926
927
928
929
930
931
932static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
933{
934 struct l2tp_tunnel *tunnel = session->tunnel;
935 __be16 *bufp = buf;
936 __be16 *optr = buf;
937 u16 flags = L2TP_HDR_VER_2;
938 u32 tunnel_id = tunnel->peer_tunnel_id;
939 u32 session_id = session->peer_session_id;
940
941 if (session->send_seq)
942 flags |= L2TP_HDRFLAG_S;
943
944
945 *bufp++ = htons(flags);
946 *bufp++ = htons(tunnel_id);
947 *bufp++ = htons(session_id);
948 if (session->send_seq) {
949 *bufp++ = htons(session->ns);
950 *bufp++ = 0;
951 session->ns++;
952 session->ns &= 0xffff;
953 l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated ns to %u\n",
954 session->name, session->ns);
955 }
956
957 return bufp - optr;
958}
959
960static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
961{
962 struct l2tp_tunnel *tunnel = session->tunnel;
963 char *bufp = buf;
964 char *optr = bufp;
965
966
967
968
969 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
970 u16 flags = L2TP_HDR_VER_3;
971 *((__be16 *) bufp) = htons(flags);
972 bufp += 2;
973 *((__be16 *) bufp) = 0;
974 bufp += 2;
975 }
976
977 *((__be32 *) bufp) = htonl(session->peer_session_id);
978 bufp += 4;
979 if (session->cookie_len) {
980 memcpy(bufp, &session->cookie[0], session->cookie_len);
981 bufp += session->cookie_len;
982 }
983 if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
984 u32 l2h = 0;
985
986 if (session->send_seq) {
987 l2h = 0x40000000 | session->ns;
988 session->ns++;
989 session->ns &= 0xffffff;
990 l2tp_dbg(session, L2TP_MSG_SEQ,
991 "%s: updated ns to %u\n",
992 session->name, session->ns);
993 }
994
995 *((__be32 *)bufp) = htonl(l2h);
996 bufp += 4;
997 }
998
999 return bufp - optr;
1000}
1001
1002static void l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1003 struct flowi *fl, size_t data_len)
1004{
1005 struct l2tp_tunnel *tunnel = session->tunnel;
1006 unsigned int len = skb->len;
1007 int error;
1008
1009
1010 if (session->send_seq)
1011 l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %zd bytes, ns=%u\n",
1012 session->name, data_len, session->ns - 1);
1013 else
1014 l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %zd bytes\n",
1015 session->name, data_len);
1016
1017 if (session->debug & L2TP_MSG_DATA) {
1018 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1019 unsigned char *datap = skb->data + uhlen;
1020
1021 pr_debug("%s: xmit\n", session->name);
1022 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
1023 datap, min_t(size_t, 32, len - uhlen));
1024 }
1025
1026
1027 skb->ignore_df = 1;
1028#if IS_ENABLED(CONFIG_IPV6)
1029 if (l2tp_sk_is_v6(tunnel->sock))
1030 error = inet6_csk_xmit(tunnel->sock, skb, NULL);
1031 else
1032#endif
1033 error = ip_queue_xmit(tunnel->sock, skb, fl);
1034
1035
1036 if (error >= 0) {
1037 atomic_long_inc(&tunnel->stats.tx_packets);
1038 atomic_long_add(len, &tunnel->stats.tx_bytes);
1039 atomic_long_inc(&session->stats.tx_packets);
1040 atomic_long_add(len, &session->stats.tx_bytes);
1041 } else {
1042 atomic_long_inc(&tunnel->stats.tx_errors);
1043 atomic_long_inc(&session->stats.tx_errors);
1044 }
1045}
1046
1047
1048
1049
1050int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len)
1051{
1052 int data_len = skb->len;
1053 struct l2tp_tunnel *tunnel = session->tunnel;
1054 struct sock *sk = tunnel->sock;
1055 struct flowi *fl;
1056 struct udphdr *uh;
1057 struct inet_sock *inet;
1058 int headroom;
1059 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1060 int udp_len;
1061 int ret = NET_XMIT_SUCCESS;
1062
1063
1064
1065
1066
1067 headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1068 uhlen + hdr_len;
1069 if (skb_cow_head(skb, headroom)) {
1070 kfree_skb(skb);
1071 return NET_XMIT_DROP;
1072 }
1073
1074
1075 session->build_header(session, __skb_push(skb, hdr_len));
1076
1077
1078 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1079 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
1080 IPSKB_REROUTED);
1081 nf_reset(skb);
1082
1083 bh_lock_sock(sk);
1084 if (sock_owned_by_user(sk)) {
1085 kfree_skb(skb);
1086 ret = NET_XMIT_DROP;
1087 goto out_unlock;
1088 }
1089
1090
1091
1092
1093 if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
1094 kfree_skb(skb);
1095 ret = NET_XMIT_DROP;
1096 goto out_unlock;
1097 }
1098
1099
1100 skb_dst_drop(skb);
1101 skb_dst_set(skb, sk_dst_check(sk, 0));
1102
1103 inet = inet_sk(sk);
1104 fl = &inet->cork.fl;
1105 switch (tunnel->encap) {
1106 case L2TP_ENCAPTYPE_UDP:
1107
1108 __skb_push(skb, sizeof(*uh));
1109 skb_reset_transport_header(skb);
1110 uh = udp_hdr(skb);
1111 uh->source = inet->inet_sport;
1112 uh->dest = inet->inet_dport;
1113 udp_len = uhlen + hdr_len + data_len;
1114 uh->len = htons(udp_len);
1115
1116
1117#if IS_ENABLED(CONFIG_IPV6)
1118 if (l2tp_sk_is_v6(sk))
1119 udp6_set_csum(udp_get_no_check6_tx(sk),
1120 skb, &inet6_sk(sk)->saddr,
1121 &sk->sk_v6_daddr, udp_len);
1122 else
1123#endif
1124 udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
1125 inet->inet_daddr, udp_len);
1126 break;
1127
1128 case L2TP_ENCAPTYPE_IP:
1129 break;
1130 }
1131
1132 l2tp_xmit_core(session, skb, fl, data_len);
1133out_unlock:
1134 bh_unlock_sock(sk);
1135
1136 return ret;
1137}
1138EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148static void l2tp_tunnel_destruct(struct sock *sk)
1149{
1150 struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
1151
1152 if (tunnel == NULL)
1153 goto end;
1154
1155 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
1156
1157
1158 switch (tunnel->encap) {
1159 case L2TP_ENCAPTYPE_UDP:
1160
1161 (udp_sk(sk))->encap_type = 0;
1162 (udp_sk(sk))->encap_rcv = NULL;
1163 (udp_sk(sk))->encap_destroy = NULL;
1164 break;
1165 case L2TP_ENCAPTYPE_IP:
1166 break;
1167 }
1168
1169
1170 sk->sk_destruct = tunnel->old_sk_destruct;
1171 sk->sk_user_data = NULL;
1172
1173
1174 if (sk->sk_destruct)
1175 (*sk->sk_destruct)(sk);
1176
1177 kfree_rcu(tunnel, rcu);
1178end:
1179 return;
1180}
1181
1182
1183
1184static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1185{
1186 int hash;
1187 struct hlist_node *walk;
1188 struct hlist_node *tmp;
1189 struct l2tp_session *session;
1190
1191 BUG_ON(tunnel == NULL);
1192
1193 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing all sessions...\n",
1194 tunnel->name);
1195
1196 write_lock_bh(&tunnel->hlist_lock);
1197 tunnel->acpt_newsess = false;
1198 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
1199again:
1200 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1201 session = hlist_entry(walk, struct l2tp_session, hlist);
1202
1203 l2tp_info(session, L2TP_MSG_CONTROL,
1204 "%s: closing session\n", session->name);
1205
1206 hlist_del_init(&session->hlist);
1207
1208 if (test_and_set_bit(0, &session->dead))
1209 goto again;
1210
1211 write_unlock_bh(&tunnel->hlist_lock);
1212
1213 __l2tp_session_unhash(session);
1214 l2tp_session_queue_purge(session);
1215
1216 if (session->session_close != NULL)
1217 (*session->session_close)(session);
1218
1219 l2tp_session_dec_refcount(session);
1220
1221 write_lock_bh(&tunnel->hlist_lock);
1222
1223
1224
1225
1226
1227
1228 goto again;
1229 }
1230 }
1231 write_unlock_bh(&tunnel->hlist_lock);
1232}
1233
1234
1235static void l2tp_udp_encap_destroy(struct sock *sk)
1236{
1237 struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
1238
1239 if (tunnel)
1240 l2tp_tunnel_delete(tunnel);
1241}
1242
1243
1244static void l2tp_tunnel_del_work(struct work_struct *work)
1245{
1246 struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
1247 del_work);
1248 struct sock *sk = tunnel->sock;
1249 struct socket *sock = sk->sk_socket;
1250 struct l2tp_net *pn;
1251
1252 l2tp_tunnel_closeall(tunnel);
1253
1254
1255
1256
1257 if (tunnel->fd < 0) {
1258 if (sock) {
1259 kernel_sock_shutdown(sock, SHUT_RDWR);
1260 sock_release(sock);
1261 }
1262 }
1263
1264
1265 pn = l2tp_pernet(tunnel->l2tp_net);
1266 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1267 list_del_rcu(&tunnel->list);
1268 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1269
1270
1271 l2tp_tunnel_dec_refcount(tunnel);
1272
1273
1274 l2tp_tunnel_dec_refcount(tunnel);
1275}
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286static int l2tp_tunnel_sock_create(struct net *net,
1287 u32 tunnel_id,
1288 u32 peer_tunnel_id,
1289 struct l2tp_tunnel_cfg *cfg,
1290 struct socket **sockp)
1291{
1292 int err = -EINVAL;
1293 struct socket *sock = NULL;
1294 struct udp_port_cfg udp_conf;
1295
1296 switch (cfg->encap) {
1297 case L2TP_ENCAPTYPE_UDP:
1298 memset(&udp_conf, 0, sizeof(udp_conf));
1299
1300#if IS_ENABLED(CONFIG_IPV6)
1301 if (cfg->local_ip6 && cfg->peer_ip6) {
1302 udp_conf.family = AF_INET6;
1303 memcpy(&udp_conf.local_ip6, cfg->local_ip6,
1304 sizeof(udp_conf.local_ip6));
1305 memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
1306 sizeof(udp_conf.peer_ip6));
1307 udp_conf.use_udp6_tx_checksums =
1308 ! cfg->udp6_zero_tx_checksums;
1309 udp_conf.use_udp6_rx_checksums =
1310 ! cfg->udp6_zero_rx_checksums;
1311 } else
1312#endif
1313 {
1314 udp_conf.family = AF_INET;
1315 udp_conf.local_ip = cfg->local_ip;
1316 udp_conf.peer_ip = cfg->peer_ip;
1317 udp_conf.use_udp_checksums = cfg->use_udp_checksums;
1318 }
1319
1320 udp_conf.local_udp_port = htons(cfg->local_udp_port);
1321 udp_conf.peer_udp_port = htons(cfg->peer_udp_port);
1322
1323 err = udp_sock_create(net, &udp_conf, &sock);
1324 if (err < 0)
1325 goto out;
1326
1327 break;
1328
1329 case L2TP_ENCAPTYPE_IP:
1330#if IS_ENABLED(CONFIG_IPV6)
1331 if (cfg->local_ip6 && cfg->peer_ip6) {
1332 struct sockaddr_l2tpip6 ip6_addr = {0};
1333
1334 err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
1335 IPPROTO_L2TP, &sock);
1336 if (err < 0)
1337 goto out;
1338
1339 ip6_addr.l2tp_family = AF_INET6;
1340 memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1341 sizeof(ip6_addr.l2tp_addr));
1342 ip6_addr.l2tp_conn_id = tunnel_id;
1343 err = kernel_bind(sock, (struct sockaddr *) &ip6_addr,
1344 sizeof(ip6_addr));
1345 if (err < 0)
1346 goto out;
1347
1348 ip6_addr.l2tp_family = AF_INET6;
1349 memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1350 sizeof(ip6_addr.l2tp_addr));
1351 ip6_addr.l2tp_conn_id = peer_tunnel_id;
1352 err = kernel_connect(sock,
1353 (struct sockaddr *) &ip6_addr,
1354 sizeof(ip6_addr), 0);
1355 if (err < 0)
1356 goto out;
1357 } else
1358#endif
1359 {
1360 struct sockaddr_l2tpip ip_addr = {0};
1361
1362 err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
1363 IPPROTO_L2TP, &sock);
1364 if (err < 0)
1365 goto out;
1366
1367 ip_addr.l2tp_family = AF_INET;
1368 ip_addr.l2tp_addr = cfg->local_ip;
1369 ip_addr.l2tp_conn_id = tunnel_id;
1370 err = kernel_bind(sock, (struct sockaddr *) &ip_addr,
1371 sizeof(ip_addr));
1372 if (err < 0)
1373 goto out;
1374
1375 ip_addr.l2tp_family = AF_INET;
1376 ip_addr.l2tp_addr = cfg->peer_ip;
1377 ip_addr.l2tp_conn_id = peer_tunnel_id;
1378 err = kernel_connect(sock, (struct sockaddr *) &ip_addr,
1379 sizeof(ip_addr), 0);
1380 if (err < 0)
1381 goto out;
1382 }
1383 break;
1384
1385 default:
1386 goto out;
1387 }
1388
1389out:
1390 *sockp = sock;
1391 if ((err < 0) && sock) {
1392 kernel_sock_shutdown(sock, SHUT_RDWR);
1393 sock_release(sock);
1394 *sockp = NULL;
1395 }
1396
1397 return err;
1398}
1399
1400static struct lock_class_key l2tp_socket_class;
1401
1402int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1403{
1404 struct l2tp_tunnel *tunnel = NULL;
1405 int err;
1406 enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1407
1408 if (cfg != NULL)
1409 encap = cfg->encap;
1410
1411 tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL);
1412 if (tunnel == NULL) {
1413 err = -ENOMEM;
1414 goto err;
1415 }
1416
1417 tunnel->version = version;
1418 tunnel->tunnel_id = tunnel_id;
1419 tunnel->peer_tunnel_id = peer_tunnel_id;
1420 tunnel->debug = L2TP_DEFAULT_DEBUG_FLAGS;
1421
1422 tunnel->magic = L2TP_TUNNEL_MAGIC;
1423 sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1424 rwlock_init(&tunnel->hlist_lock);
1425 tunnel->acpt_newsess = true;
1426
1427 if (cfg != NULL)
1428 tunnel->debug = cfg->debug;
1429
1430 tunnel->encap = encap;
1431
1432 refcount_set(&tunnel->ref_count, 1);
1433 tunnel->fd = fd;
1434
1435
1436 INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1437
1438 INIT_LIST_HEAD(&tunnel->list);
1439
1440 err = 0;
1441err:
1442 if (tunnelp)
1443 *tunnelp = tunnel;
1444
1445 return err;
1446}
1447EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1448
1449static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
1450 enum l2tp_encap_type encap)
1451{
1452 if (!net_eq(sock_net(sk), net))
1453 return -EINVAL;
1454
1455 if (sk->sk_type != SOCK_DGRAM)
1456 return -EPROTONOSUPPORT;
1457
1458 if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
1459 (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
1460 return -EPROTONOSUPPORT;
1461
1462 if (sk->sk_user_data)
1463 return -EBUSY;
1464
1465 return 0;
1466}
1467
1468int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1469 struct l2tp_tunnel_cfg *cfg)
1470{
1471 struct l2tp_tunnel *tunnel_walk;
1472 struct l2tp_net *pn;
1473 struct socket *sock;
1474 struct sock *sk;
1475 int ret;
1476
1477 if (tunnel->fd < 0) {
1478 ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
1479 tunnel->peer_tunnel_id, cfg,
1480 &sock);
1481 if (ret < 0)
1482 goto err;
1483 } else {
1484 sock = sockfd_lookup(tunnel->fd, &ret);
1485 if (!sock)
1486 goto err;
1487
1488 ret = l2tp_validate_socket(sock->sk, net, tunnel->encap);
1489 if (ret < 0)
1490 goto err_sock;
1491 }
1492
1493 tunnel->l2tp_net = net;
1494 pn = l2tp_pernet(net);
1495
1496 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1497 list_for_each_entry(tunnel_walk, &pn->l2tp_tunnel_list, list) {
1498 if (tunnel_walk->tunnel_id == tunnel->tunnel_id) {
1499 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1500
1501 ret = -EEXIST;
1502 goto err_sock;
1503 }
1504 }
1505 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1506 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1507
1508 sk = sock->sk;
1509 sock_hold(sk);
1510 tunnel->sock = sk;
1511
1512 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1513 struct udp_tunnel_sock_cfg udp_cfg = {
1514 .sk_user_data = tunnel,
1515 .encap_type = UDP_ENCAP_L2TPINUDP,
1516 .encap_rcv = l2tp_udp_encap_recv,
1517 .encap_destroy = l2tp_udp_encap_destroy,
1518 };
1519
1520 setup_udp_tunnel_sock(net, sock, &udp_cfg);
1521 } else {
1522 sk->sk_user_data = tunnel;
1523 }
1524
1525 tunnel->old_sk_destruct = sk->sk_destruct;
1526 sk->sk_destruct = &l2tp_tunnel_destruct;
1527 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class,
1528 "l2tp_sock");
1529 sk->sk_allocation = GFP_ATOMIC;
1530
1531 if (tunnel->fd >= 0)
1532 sockfd_put(sock);
1533
1534 return 0;
1535
1536err_sock:
1537 if (tunnel->fd < 0)
1538 sock_release(sock);
1539 else
1540 sockfd_put(sock);
1541err:
1542 return ret;
1543}
1544EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
1545
1546
1547
1548void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1549{
1550 if (!test_and_set_bit(0, &tunnel->dead)) {
1551 l2tp_tunnel_inc_refcount(tunnel);
1552 queue_work(l2tp_wq, &tunnel->del_work);
1553 }
1554}
1555EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1556
1557
1558
1559void l2tp_session_free(struct l2tp_session *session)
1560{
1561 struct l2tp_tunnel *tunnel = session->tunnel;
1562
1563 BUG_ON(refcount_read(&session->ref_count) != 0);
1564
1565 if (tunnel) {
1566 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1567 l2tp_tunnel_dec_refcount(tunnel);
1568 }
1569
1570 kfree(session);
1571}
1572EXPORT_SYMBOL_GPL(l2tp_session_free);
1573
1574
1575
1576
1577
1578
1579void __l2tp_session_unhash(struct l2tp_session *session)
1580{
1581 struct l2tp_tunnel *tunnel = session->tunnel;
1582
1583
1584 if (tunnel) {
1585
1586 write_lock_bh(&tunnel->hlist_lock);
1587 hlist_del_init(&session->hlist);
1588 write_unlock_bh(&tunnel->hlist_lock);
1589
1590
1591 if (tunnel->version != L2TP_HDR_VER_2) {
1592 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1593 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1594 hlist_del_init_rcu(&session->global_hlist);
1595 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1596 synchronize_rcu();
1597 }
1598 }
1599}
1600EXPORT_SYMBOL_GPL(__l2tp_session_unhash);
1601
1602
1603
1604
1605int l2tp_session_delete(struct l2tp_session *session)
1606{
1607 if (test_and_set_bit(0, &session->dead))
1608 return 0;
1609
1610 __l2tp_session_unhash(session);
1611 l2tp_session_queue_purge(session);
1612 if (session->session_close != NULL)
1613 (*session->session_close)(session);
1614
1615 l2tp_session_dec_refcount(session);
1616
1617 return 0;
1618}
1619EXPORT_SYMBOL_GPL(l2tp_session_delete);
1620
1621
1622
1623
1624void l2tp_session_set_header_len(struct l2tp_session *session, int version)
1625{
1626 if (version == L2TP_HDR_VER_2) {
1627 session->hdr_len = 6;
1628 if (session->send_seq)
1629 session->hdr_len += 4;
1630 } else {
1631 session->hdr_len = 4 + session->cookie_len;
1632 session->hdr_len += l2tp_get_l2specific_len(session);
1633 if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
1634 session->hdr_len += 4;
1635 }
1636
1637}
1638EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1639
1640struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1641{
1642 struct l2tp_session *session;
1643
1644 session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
1645 if (session != NULL) {
1646 session->magic = L2TP_SESSION_MAGIC;
1647 session->tunnel = tunnel;
1648
1649 session->session_id = session_id;
1650 session->peer_session_id = peer_session_id;
1651 session->nr = 0;
1652 if (tunnel->version == L2TP_HDR_VER_2)
1653 session->nr_max = 0xffff;
1654 else
1655 session->nr_max = 0xffffff;
1656 session->nr_window_size = session->nr_max / 2;
1657 session->nr_oos_count_max = 4;
1658
1659
1660 session->reorder_skip = 1;
1661
1662 sprintf(&session->name[0], "sess %u/%u",
1663 tunnel->tunnel_id, session->session_id);
1664
1665 skb_queue_head_init(&session->reorder_q);
1666
1667 INIT_HLIST_NODE(&session->hlist);
1668 INIT_HLIST_NODE(&session->global_hlist);
1669
1670
1671 session->debug = tunnel->debug;
1672
1673 if (cfg) {
1674 session->pwtype = cfg->pw_type;
1675 session->debug = cfg->debug;
1676 session->send_seq = cfg->send_seq;
1677 session->recv_seq = cfg->recv_seq;
1678 session->lns_mode = cfg->lns_mode;
1679 session->reorder_timeout = cfg->reorder_timeout;
1680 session->l2specific_type = cfg->l2specific_type;
1681 session->cookie_len = cfg->cookie_len;
1682 memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1683 session->peer_cookie_len = cfg->peer_cookie_len;
1684 memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1685 }
1686
1687 if (tunnel->version == L2TP_HDR_VER_2)
1688 session->build_header = l2tp_build_l2tpv2_header;
1689 else
1690 session->build_header = l2tp_build_l2tpv3_header;
1691
1692 l2tp_session_set_header_len(session, tunnel->version);
1693
1694 refcount_set(&session->ref_count, 1);
1695
1696 return session;
1697 }
1698
1699 return ERR_PTR(-ENOMEM);
1700}
1701EXPORT_SYMBOL_GPL(l2tp_session_create);
1702
1703
1704
1705
1706
1707static __net_init int l2tp_init_net(struct net *net)
1708{
1709 struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1710 int hash;
1711
1712 INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
1713 spin_lock_init(&pn->l2tp_tunnel_list_lock);
1714
1715 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1716 INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
1717
1718 spin_lock_init(&pn->l2tp_session_hlist_lock);
1719
1720 return 0;
1721}
1722
1723static __net_exit void l2tp_exit_net(struct net *net)
1724{
1725 struct l2tp_net *pn = l2tp_pernet(net);
1726 struct l2tp_tunnel *tunnel = NULL;
1727 int hash;
1728
1729 rcu_read_lock_bh();
1730 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
1731 l2tp_tunnel_delete(tunnel);
1732 }
1733 rcu_read_unlock_bh();
1734
1735 if (l2tp_wq)
1736 flush_workqueue(l2tp_wq);
1737 rcu_barrier();
1738
1739 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1740 WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash]));
1741}
1742
1743static struct pernet_operations l2tp_net_ops = {
1744 .init = l2tp_init_net,
1745 .exit = l2tp_exit_net,
1746 .id = &l2tp_net_id,
1747 .size = sizeof(struct l2tp_net),
1748};
1749
1750static int __init l2tp_init(void)
1751{
1752 int rc = 0;
1753
1754 rc = register_pernet_device(&l2tp_net_ops);
1755 if (rc)
1756 goto out;
1757
1758 l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
1759 if (!l2tp_wq) {
1760 pr_err("alloc_workqueue failed\n");
1761 unregister_pernet_device(&l2tp_net_ops);
1762 rc = -ENOMEM;
1763 goto out;
1764 }
1765
1766 pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1767
1768out:
1769 return rc;
1770}
1771
1772static void __exit l2tp_exit(void)
1773{
1774 unregister_pernet_device(&l2tp_net_ops);
1775 if (l2tp_wq) {
1776 destroy_workqueue(l2tp_wq);
1777 l2tp_wq = NULL;
1778 }
1779}
1780
1781module_init(l2tp_init);
1782module_exit(l2tp_exit);
1783
1784MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1785MODULE_DESCRIPTION("L2TP core");
1786MODULE_LICENSE("GPL");
1787MODULE_VERSION(L2TP_DRV_VERSION);
1788