1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/module.h>
24#include <linux/string.h>
25#include <linux/list.h>
26#include <linux/rculist.h>
27#include <linux/uaccess.h>
28
29#include <linux/kernel.h>
30#include <linux/spinlock.h>
31#include <linux/kthread.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/errno.h>
35#include <linux/jiffies.h>
36
37#include <linux/netdevice.h>
38#include <linux/net.h>
39#include <linux/inetdevice.h>
40#include <linux/skbuff.h>
41#include <linux/init.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/udp.h>
45#include <linux/l2tp.h>
46#include <linux/hash.h>
47#include <linux/sort.h>
48#include <linux/file.h>
49#include <linux/nsproxy.h>
50#include <net/net_namespace.h>
51#include <net/netns/generic.h>
52#include <net/dst.h>
53#include <net/ip.h>
54#include <net/udp.h>
55#include <net/udp_tunnel.h>
56#include <net/inet_common.h>
57#include <net/xfrm.h>
58#include <net/protocol.h>
59#include <net/inet6_connection_sock.h>
60#include <net/inet_ecn.h>
61#include <net/ip6_route.h>
62#include <net/ip6_checksum.h>
63
64#include <asm/byteorder.h>
65#include <linux/atomic.h>
66
67#include "l2tp_core.h"
68
69#define L2TP_DRV_VERSION "V2.0"
70
71
72#define L2TP_HDRFLAG_T 0x8000
73#define L2TP_HDRFLAG_L 0x4000
74#define L2TP_HDRFLAG_S 0x0800
75#define L2TP_HDRFLAG_O 0x0200
76#define L2TP_HDRFLAG_P 0x0100
77
78#define L2TP_HDR_VER_MASK 0x000F
79#define L2TP_HDR_VER_2 0x0002
80#define L2TP_HDR_VER_3 0x0003
81
82
83#define L2TP_SLFLAG_S 0x40000000
84#define L2TP_SL_SEQ_MASK 0x00ffffff
85
86#define L2TP_HDR_SIZE_SEQ 10
87#define L2TP_HDR_SIZE_NOSEQ 6
88
89
90#define L2TP_DEFAULT_DEBUG_FLAGS 0
91
92
93
94struct l2tp_skb_cb {
95 u32 ns;
96 u16 has_seq;
97 u16 length;
98 unsigned long expires;
99};
100
101#define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
102
103static atomic_t l2tp_tunnel_count;
104static atomic_t l2tp_session_count;
105static struct workqueue_struct *l2tp_wq;
106
107
108static unsigned int l2tp_net_id;
109struct l2tp_net {
110 struct list_head l2tp_tunnel_list;
111 spinlock_t l2tp_tunnel_list_lock;
112 struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
113 spinlock_t l2tp_session_hlist_lock;
114};
115
116
117static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
118{
119 return sk->sk_user_data;
120}
121
122static inline struct l2tp_net *l2tp_pernet(const struct net *net)
123{
124 BUG_ON(!net);
125
126 return net_generic(net, l2tp_net_id);
127}
128
129
130
131
132
133
134static inline struct hlist_head *
135l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
136{
137 return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
138
139}
140
141
142
143
144
145static struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
146{
147 int err = 0;
148 struct socket *sock = NULL;
149 struct sock *sk = NULL;
150
151 if (!tunnel)
152 goto out;
153
154 if (tunnel->fd >= 0) {
155
156
157
158
159 sock = sockfd_lookup(tunnel->fd, &err);
160 if (sock)
161 sk = sock->sk;
162 } else {
163
164 sk = tunnel->sock;
165 sock_hold(sk);
166 }
167
168out:
169 return sk;
170}
171
172
173static void l2tp_tunnel_sock_put(struct sock *sk)
174{
175 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
176 if (tunnel) {
177 if (tunnel->fd >= 0) {
178
179 sockfd_put(sk->sk_socket);
180 }
181 sock_put(sk);
182 }
183 sock_put(sk);
184}
185
186
187
188
189
190
191
192static inline struct hlist_head *
193l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
194{
195 return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
196}
197
198
199struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
200{
201 const struct l2tp_net *pn = l2tp_pernet(net);
202 struct l2tp_tunnel *tunnel;
203
204 rcu_read_lock_bh();
205 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
206 if (tunnel->tunnel_id == tunnel_id) {
207 l2tp_tunnel_inc_refcount(tunnel);
208 rcu_read_unlock_bh();
209
210 return tunnel;
211 }
212 }
213 rcu_read_unlock_bh();
214
215 return NULL;
216}
217EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
218
219
220
221
222struct l2tp_session *l2tp_session_get(const struct net *net,
223 struct l2tp_tunnel *tunnel,
224 u32 session_id, bool do_ref)
225{
226 struct hlist_head *session_list;
227 struct l2tp_session *session;
228
229 if (!tunnel) {
230 struct l2tp_net *pn = l2tp_pernet(net);
231
232 session_list = l2tp_session_id_hash_2(pn, session_id);
233
234 rcu_read_lock_bh();
235 hlist_for_each_entry_rcu(session, session_list, global_hlist) {
236 if (session->session_id == session_id) {
237 l2tp_session_inc_refcount(session);
238 if (do_ref && session->ref)
239 session->ref(session);
240 rcu_read_unlock_bh();
241
242 return session;
243 }
244 }
245 rcu_read_unlock_bh();
246
247 return NULL;
248 }
249
250 session_list = l2tp_session_id_hash(tunnel, session_id);
251 read_lock_bh(&tunnel->hlist_lock);
252 hlist_for_each_entry(session, session_list, hlist) {
253 if (session->session_id == session_id) {
254 l2tp_session_inc_refcount(session);
255 if (do_ref && session->ref)
256 session->ref(session);
257 read_unlock_bh(&tunnel->hlist_lock);
258
259 return session;
260 }
261 }
262 read_unlock_bh(&tunnel->hlist_lock);
263
264 return NULL;
265}
266EXPORT_SYMBOL_GPL(l2tp_session_get);
267
268struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
269 bool do_ref)
270{
271 int hash;
272 struct l2tp_session *session;
273 int count = 0;
274
275 read_lock_bh(&tunnel->hlist_lock);
276 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
277 hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
278 if (++count > nth) {
279 l2tp_session_inc_refcount(session);
280 if (do_ref && session->ref)
281 session->ref(session);
282 read_unlock_bh(&tunnel->hlist_lock);
283 return session;
284 }
285 }
286 }
287
288 read_unlock_bh(&tunnel->hlist_lock);
289
290 return NULL;
291}
292EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
293
294
295
296
297struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
298 const char *ifname,
299 bool do_ref)
300{
301 struct l2tp_net *pn = l2tp_pernet(net);
302 int hash;
303 struct l2tp_session *session;
304
305 rcu_read_lock_bh();
306 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
307 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
308 if (!strcmp(session->ifname, ifname)) {
309 l2tp_session_inc_refcount(session);
310 if (do_ref && session->ref)
311 session->ref(session);
312 rcu_read_unlock_bh();
313
314 return session;
315 }
316 }
317 }
318
319 rcu_read_unlock_bh();
320
321 return NULL;
322}
323EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
324
325static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
326 struct l2tp_session *session)
327{
328 struct l2tp_session *session_walk;
329 struct hlist_head *g_head;
330 struct hlist_head *head;
331 struct l2tp_net *pn;
332
333 head = l2tp_session_id_hash(tunnel, session->session_id);
334
335 write_lock_bh(&tunnel->hlist_lock);
336 hlist_for_each_entry(session_walk, head, hlist)
337 if (session_walk->session_id == session->session_id)
338 goto exist;
339
340 if (tunnel->version == L2TP_HDR_VER_3) {
341 pn = l2tp_pernet(tunnel->l2tp_net);
342 g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net),
343 session->session_id);
344
345 spin_lock_bh(&pn->l2tp_session_hlist_lock);
346 hlist_for_each_entry(session_walk, g_head, global_hlist)
347 if (session_walk->session_id == session->session_id)
348 goto exist_glob;
349
350 hlist_add_head_rcu(&session->global_hlist, g_head);
351 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
352 }
353
354 hlist_add_head(&session->hlist, head);
355 write_unlock_bh(&tunnel->hlist_lock);
356
357 return 0;
358
359exist_glob:
360 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
361exist:
362 write_unlock_bh(&tunnel->hlist_lock);
363
364 return -EEXIST;
365}
366
367
368
369struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id)
370{
371 struct l2tp_tunnel *tunnel;
372 struct l2tp_net *pn = l2tp_pernet(net);
373
374 rcu_read_lock_bh();
375 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
376 if (tunnel->tunnel_id == tunnel_id) {
377 rcu_read_unlock_bh();
378 return tunnel;
379 }
380 }
381 rcu_read_unlock_bh();
382
383 return NULL;
384}
385EXPORT_SYMBOL_GPL(l2tp_tunnel_find);
386
387struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth)
388{
389 struct l2tp_net *pn = l2tp_pernet(net);
390 struct l2tp_tunnel *tunnel;
391 int count = 0;
392
393 rcu_read_lock_bh();
394 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
395 if (++count > nth) {
396 rcu_read_unlock_bh();
397 return tunnel;
398 }
399 }
400
401 rcu_read_unlock_bh();
402
403 return NULL;
404}
405EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth);
406
407
408
409
410
411
412
413
414static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
415{
416 struct sk_buff *skbp;
417 struct sk_buff *tmp;
418 u32 ns = L2TP_SKB_CB(skb)->ns;
419
420 spin_lock_bh(&session->reorder_q.lock);
421 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
422 if (L2TP_SKB_CB(skbp)->ns > ns) {
423 __skb_queue_before(&session->reorder_q, skbp, skb);
424 l2tp_dbg(session, L2TP_MSG_SEQ,
425 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
426 session->name, ns, L2TP_SKB_CB(skbp)->ns,
427 skb_queue_len(&session->reorder_q));
428 atomic_long_inc(&session->stats.rx_oos_packets);
429 goto out;
430 }
431 }
432
433 __skb_queue_tail(&session->reorder_q, skb);
434
435out:
436 spin_unlock_bh(&session->reorder_q.lock);
437}
438
439
440
441static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
442{
443 struct l2tp_tunnel *tunnel = session->tunnel;
444 int length = L2TP_SKB_CB(skb)->length;
445
446
447
448
449 skb_orphan(skb);
450
451 atomic_long_inc(&tunnel->stats.rx_packets);
452 atomic_long_add(length, &tunnel->stats.rx_bytes);
453 atomic_long_inc(&session->stats.rx_packets);
454 atomic_long_add(length, &session->stats.rx_bytes);
455
456 if (L2TP_SKB_CB(skb)->has_seq) {
457
458 session->nr++;
459 session->nr &= session->nr_max;
460
461 l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated nr to %hu\n",
462 session->name, session->nr);
463 }
464
465
466 if (session->recv_skb != NULL)
467 (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
468 else
469 kfree_skb(skb);
470
471 if (session->deref)
472 (*session->deref)(session);
473}
474
475
476
477
478static void l2tp_recv_dequeue(struct l2tp_session *session)
479{
480 struct sk_buff *skb;
481 struct sk_buff *tmp;
482
483
484
485
486
487start:
488 spin_lock_bh(&session->reorder_q.lock);
489 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
490 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
491 atomic_long_inc(&session->stats.rx_seq_discards);
492 atomic_long_inc(&session->stats.rx_errors);
493 l2tp_dbg(session, L2TP_MSG_SEQ,
494 "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",
495 session->name, L2TP_SKB_CB(skb)->ns,
496 L2TP_SKB_CB(skb)->length, session->nr,
497 skb_queue_len(&session->reorder_q));
498 session->reorder_skip = 1;
499 __skb_unlink(skb, &session->reorder_q);
500 kfree_skb(skb);
501 if (session->deref)
502 (*session->deref)(session);
503 continue;
504 }
505
506 if (L2TP_SKB_CB(skb)->has_seq) {
507 if (session->reorder_skip) {
508 l2tp_dbg(session, L2TP_MSG_SEQ,
509 "%s: advancing nr to next pkt: %u -> %u",
510 session->name, session->nr,
511 L2TP_SKB_CB(skb)->ns);
512 session->reorder_skip = 0;
513 session->nr = L2TP_SKB_CB(skb)->ns;
514 }
515 if (L2TP_SKB_CB(skb)->ns != session->nr) {
516 l2tp_dbg(session, L2TP_MSG_SEQ,
517 "%s: holding oos pkt %u len %d, waiting for %u, reorder_q_len=%d\n",
518 session->name, L2TP_SKB_CB(skb)->ns,
519 L2TP_SKB_CB(skb)->length, session->nr,
520 skb_queue_len(&session->reorder_q));
521 goto out;
522 }
523 }
524 __skb_unlink(skb, &session->reorder_q);
525
526
527
528
529 spin_unlock_bh(&session->reorder_q.lock);
530 l2tp_recv_dequeue_skb(session, skb);
531 goto start;
532 }
533
534out:
535 spin_unlock_bh(&session->reorder_q.lock);
536}
537
538static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
539{
540 u32 nws;
541
542 if (nr >= session->nr)
543 nws = nr - session->nr;
544 else
545 nws = (session->nr_max + 1) - (session->nr - nr);
546
547 return nws < session->nr_window_size;
548}
549
550
551
552
553static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
554{
555 if (!l2tp_seq_check_rx_window(session, L2TP_SKB_CB(skb)->ns)) {
556
557
558
559 l2tp_dbg(session, L2TP_MSG_SEQ,
560 "%s: pkt %u len %d discarded, outside window, nr=%u\n",
561 session->name, L2TP_SKB_CB(skb)->ns,
562 L2TP_SKB_CB(skb)->length, session->nr);
563 goto discard;
564 }
565
566 if (session->reorder_timeout != 0) {
567
568
569
570 l2tp_recv_queue_skb(session, skb);
571 goto out;
572 }
573
574
575
576
577
578
579 if (L2TP_SKB_CB(skb)->ns == session->nr) {
580 skb_queue_tail(&session->reorder_q, skb);
581 } else {
582 u32 nr_oos = L2TP_SKB_CB(skb)->ns;
583 u32 nr_next = (session->nr_oos + 1) & session->nr_max;
584
585 if (nr_oos == nr_next)
586 session->nr_oos_count++;
587 else
588 session->nr_oos_count = 0;
589
590 session->nr_oos = nr_oos;
591 if (session->nr_oos_count > session->nr_oos_count_max) {
592 session->reorder_skip = 1;
593 l2tp_dbg(session, L2TP_MSG_SEQ,
594 "%s: %d oos packets received. Resetting sequence numbers\n",
595 session->name, session->nr_oos_count);
596 }
597 if (!session->reorder_skip) {
598 atomic_long_inc(&session->stats.rx_seq_discards);
599 l2tp_dbg(session, L2TP_MSG_SEQ,
600 "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",
601 session->name, L2TP_SKB_CB(skb)->ns,
602 L2TP_SKB_CB(skb)->length, session->nr,
603 skb_queue_len(&session->reorder_q));
604 goto discard;
605 }
606 skb_queue_tail(&session->reorder_q, skb);
607 }
608
609out:
610 return 0;
611
612discard:
613 return 1;
614}
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
680 unsigned char *ptr, unsigned char *optr, u16 hdrflags,
681 int length, int (*payload_hook)(struct sk_buff *skb))
682{
683 struct l2tp_tunnel *tunnel = session->tunnel;
684 int offset;
685 u32 ns, nr;
686
687
688 if (session->peer_cookie_len > 0) {
689 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
690 l2tp_info(tunnel, L2TP_MSG_DATA,
691 "%s: cookie mismatch (%u/%u). Discarding.\n",
692 tunnel->name, tunnel->tunnel_id,
693 session->session_id);
694 atomic_long_inc(&session->stats.rx_cookie_discards);
695 goto discard;
696 }
697 ptr += session->peer_cookie_len;
698 }
699
700
701
702
703
704
705
706
707 ns = nr = 0;
708 L2TP_SKB_CB(skb)->has_seq = 0;
709 if (tunnel->version == L2TP_HDR_VER_2) {
710 if (hdrflags & L2TP_HDRFLAG_S) {
711 ns = ntohs(*(__be16 *) ptr);
712 ptr += 2;
713 nr = ntohs(*(__be16 *) ptr);
714 ptr += 2;
715
716
717 L2TP_SKB_CB(skb)->ns = ns;
718 L2TP_SKB_CB(skb)->has_seq = 1;
719
720 l2tp_dbg(session, L2TP_MSG_SEQ,
721 "%s: recv data ns=%u, nr=%u, session nr=%u\n",
722 session->name, ns, nr, session->nr);
723 }
724 } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
725 u32 l2h = ntohl(*(__be32 *) ptr);
726
727 if (l2h & 0x40000000) {
728 ns = l2h & 0x00ffffff;
729
730
731 L2TP_SKB_CB(skb)->ns = ns;
732 L2TP_SKB_CB(skb)->has_seq = 1;
733
734 l2tp_dbg(session, L2TP_MSG_SEQ,
735 "%s: recv data ns=%u, session nr=%u\n",
736 session->name, ns, session->nr);
737 }
738 }
739
740
741 ptr += session->l2specific_len;
742
743 if (L2TP_SKB_CB(skb)->has_seq) {
744
745
746
747
748 if ((!session->lns_mode) && (!session->send_seq)) {
749 l2tp_info(session, L2TP_MSG_SEQ,
750 "%s: requested to enable seq numbers by LNS\n",
751 session->name);
752 session->send_seq = 1;
753 l2tp_session_set_header_len(session, tunnel->version);
754 }
755 } else {
756
757
758
759 if (session->recv_seq) {
760 l2tp_warn(session, L2TP_MSG_SEQ,
761 "%s: recv data has no seq numbers when required. Discarding.\n",
762 session->name);
763 atomic_long_inc(&session->stats.rx_seq_discards);
764 goto discard;
765 }
766
767
768
769
770
771
772 if ((!session->lns_mode) && (session->send_seq)) {
773 l2tp_info(session, L2TP_MSG_SEQ,
774 "%s: requested to disable seq numbers by LNS\n",
775 session->name);
776 session->send_seq = 0;
777 l2tp_session_set_header_len(session, tunnel->version);
778 } else if (session->send_seq) {
779 l2tp_warn(session, L2TP_MSG_SEQ,
780 "%s: recv data has no seq numbers when required. Discarding.\n",
781 session->name);
782 atomic_long_inc(&session->stats.rx_seq_discards);
783 goto discard;
784 }
785 }
786
787
788
789
790
791
792 if (tunnel->version == L2TP_HDR_VER_2) {
793
794 if (hdrflags & L2TP_HDRFLAG_O) {
795 offset = ntohs(*(__be16 *)ptr);
796 ptr += 2 + offset;
797 }
798 } else
799 ptr += session->offset;
800
801 offset = ptr - optr;
802 if (!pskb_may_pull(skb, offset))
803 goto discard;
804
805 __skb_pull(skb, offset);
806
807
808
809
810 if (payload_hook)
811 if ((*payload_hook)(skb))
812 goto discard;
813
814
815
816
817
818 L2TP_SKB_CB(skb)->length = length;
819 L2TP_SKB_CB(skb)->expires = jiffies +
820 (session->reorder_timeout ? session->reorder_timeout : HZ);
821
822
823
824
825 if (L2TP_SKB_CB(skb)->has_seq) {
826 if (l2tp_recv_data_seq(session, skb))
827 goto discard;
828 } else {
829
830
831
832
833 skb_queue_tail(&session->reorder_q, skb);
834 }
835
836
837 l2tp_recv_dequeue(session);
838
839 return;
840
841discard:
842 atomic_long_inc(&session->stats.rx_errors);
843 kfree_skb(skb);
844
845 if (session->deref)
846 (*session->deref)(session);
847}
848EXPORT_SYMBOL(l2tp_recv_common);
849
850
851
852int l2tp_session_queue_purge(struct l2tp_session *session)
853{
854 struct sk_buff *skb = NULL;
855 BUG_ON(!session);
856 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
857 while ((skb = skb_dequeue(&session->reorder_q))) {
858 atomic_long_inc(&session->stats.rx_errors);
859 kfree_skb(skb);
860 if (session->deref)
861 (*session->deref)(session);
862 }
863 return 0;
864}
865EXPORT_SYMBOL_GPL(l2tp_session_queue_purge);
866
867
868
869
870
871
872
873static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
874 int (*payload_hook)(struct sk_buff *skb))
875{
876 struct l2tp_session *session = NULL;
877 unsigned char *ptr, *optr;
878 u16 hdrflags;
879 u32 tunnel_id, session_id;
880 u16 version;
881 int length;
882
883
884
885
886 __skb_pull(skb, sizeof(struct udphdr));
887
888
889 if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
890 l2tp_info(tunnel, L2TP_MSG_DATA,
891 "%s: recv short packet (len=%d)\n",
892 tunnel->name, skb->len);
893 goto error;
894 }
895
896
897 if (tunnel->debug & L2TP_MSG_DATA) {
898 length = min(32u, skb->len);
899 if (!pskb_may_pull(skb, length))
900 goto error;
901
902 pr_debug("%s: recv\n", tunnel->name);
903 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
904 }
905
906
907 optr = ptr = skb->data;
908
909
910 hdrflags = ntohs(*(__be16 *) ptr);
911
912
913 version = hdrflags & L2TP_HDR_VER_MASK;
914 if (version != tunnel->version) {
915 l2tp_info(tunnel, L2TP_MSG_DATA,
916 "%s: recv protocol version mismatch: got %d expected %d\n",
917 tunnel->name, version, tunnel->version);
918 goto error;
919 }
920
921
922 length = skb->len;
923
924
925 if (hdrflags & L2TP_HDRFLAG_T) {
926 l2tp_dbg(tunnel, L2TP_MSG_DATA,
927 "%s: recv control packet, len=%d\n",
928 tunnel->name, length);
929 goto error;
930 }
931
932
933 ptr += 2;
934
935 if (tunnel->version == L2TP_HDR_VER_2) {
936
937 if (hdrflags & L2TP_HDRFLAG_L)
938 ptr += 2;
939
940
941 tunnel_id = ntohs(*(__be16 *) ptr);
942 ptr += 2;
943 session_id = ntohs(*(__be16 *) ptr);
944 ptr += 2;
945 } else {
946 ptr += 2;
947 tunnel_id = tunnel->tunnel_id;
948 session_id = ntohl(*(__be32 *) ptr);
949 ptr += 4;
950 }
951
952
953 session = l2tp_session_get(tunnel->l2tp_net, tunnel, session_id, true);
954 if (!session || !session->recv_skb) {
955 if (session) {
956 if (session->deref)
957 session->deref(session);
958 l2tp_session_dec_refcount(session);
959 }
960
961
962 l2tp_info(tunnel, L2TP_MSG_DATA,
963 "%s: no session found (%u/%u). Passing up.\n",
964 tunnel->name, tunnel_id, session_id);
965 goto error;
966 }
967
968 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
969 l2tp_session_dec_refcount(session);
970
971 return 0;
972
973error:
974
975 __skb_push(skb, sizeof(struct udphdr));
976
977 return 1;
978}
979
980
981
982
983
984
985
986int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
987{
988 struct l2tp_tunnel *tunnel;
989
990 tunnel = l2tp_sock_to_tunnel(sk);
991 if (tunnel == NULL)
992 goto pass_up;
993
994 l2tp_dbg(tunnel, L2TP_MSG_DATA, "%s: received %d bytes\n",
995 tunnel->name, skb->len);
996
997 if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
998 goto pass_up_put;
999
1000 sock_put(sk);
1001 return 0;
1002
1003pass_up_put:
1004 sock_put(sk);
1005pass_up:
1006 return 1;
1007}
1008EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
1009
1010
1011
1012
1013
1014
1015
1016static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
1017{
1018 struct l2tp_tunnel *tunnel = session->tunnel;
1019 __be16 *bufp = buf;
1020 __be16 *optr = buf;
1021 u16 flags = L2TP_HDR_VER_2;
1022 u32 tunnel_id = tunnel->peer_tunnel_id;
1023 u32 session_id = session->peer_session_id;
1024
1025 if (session->send_seq)
1026 flags |= L2TP_HDRFLAG_S;
1027
1028
1029 *bufp++ = htons(flags);
1030 *bufp++ = htons(tunnel_id);
1031 *bufp++ = htons(session_id);
1032 if (session->send_seq) {
1033 *bufp++ = htons(session->ns);
1034 *bufp++ = 0;
1035 session->ns++;
1036 session->ns &= 0xffff;
1037 l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated ns to %u\n",
1038 session->name, session->ns);
1039 }
1040
1041 return bufp - optr;
1042}
1043
1044static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
1045{
1046 struct l2tp_tunnel *tunnel = session->tunnel;
1047 char *bufp = buf;
1048 char *optr = bufp;
1049
1050
1051
1052
1053 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1054 u16 flags = L2TP_HDR_VER_3;
1055 *((__be16 *) bufp) = htons(flags);
1056 bufp += 2;
1057 *((__be16 *) bufp) = 0;
1058 bufp += 2;
1059 }
1060
1061 *((__be32 *) bufp) = htonl(session->peer_session_id);
1062 bufp += 4;
1063 if (session->cookie_len) {
1064 memcpy(bufp, &session->cookie[0], session->cookie_len);
1065 bufp += session->cookie_len;
1066 }
1067 if (session->l2specific_len) {
1068 if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
1069 u32 l2h = 0;
1070 if (session->send_seq) {
1071 l2h = 0x40000000 | session->ns;
1072 session->ns++;
1073 session->ns &= 0xffffff;
1074 l2tp_dbg(session, L2TP_MSG_SEQ,
1075 "%s: updated ns to %u\n",
1076 session->name, session->ns);
1077 }
1078
1079 *((__be32 *) bufp) = htonl(l2h);
1080 }
1081 bufp += session->l2specific_len;
1082 }
1083 if (session->offset)
1084 bufp += session->offset;
1085
1086 return bufp - optr;
1087}
1088
1089static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1090 struct flowi *fl, size_t data_len)
1091{
1092 struct l2tp_tunnel *tunnel = session->tunnel;
1093 unsigned int len = skb->len;
1094 int error;
1095
1096
1097 if (session->send_seq)
1098 l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %zd bytes, ns=%u\n",
1099 session->name, data_len, session->ns - 1);
1100 else
1101 l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %zd bytes\n",
1102 session->name, data_len);
1103
1104 if (session->debug & L2TP_MSG_DATA) {
1105 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1106 unsigned char *datap = skb->data + uhlen;
1107
1108 pr_debug("%s: xmit\n", session->name);
1109 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
1110 datap, min_t(size_t, 32, len - uhlen));
1111 }
1112
1113
1114 skb->ignore_df = 1;
1115#if IS_ENABLED(CONFIG_IPV6)
1116 if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped)
1117 error = inet6_csk_xmit(tunnel->sock, skb, NULL);
1118 else
1119#endif
1120 error = ip_queue_xmit(tunnel->sock, skb, fl);
1121
1122
1123 if (error >= 0) {
1124 atomic_long_inc(&tunnel->stats.tx_packets);
1125 atomic_long_add(len, &tunnel->stats.tx_bytes);
1126 atomic_long_inc(&session->stats.tx_packets);
1127 atomic_long_add(len, &session->stats.tx_bytes);
1128 } else {
1129 atomic_long_inc(&tunnel->stats.tx_errors);
1130 atomic_long_inc(&session->stats.tx_errors);
1131 }
1132
1133 return 0;
1134}
1135
1136
1137
1138
1139int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len)
1140{
1141 int data_len = skb->len;
1142 struct l2tp_tunnel *tunnel = session->tunnel;
1143 struct sock *sk = tunnel->sock;
1144 struct flowi *fl;
1145 struct udphdr *uh;
1146 struct inet_sock *inet;
1147 int headroom;
1148 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1149 int udp_len;
1150 int ret = NET_XMIT_SUCCESS;
1151
1152
1153
1154
1155
1156 headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1157 uhlen + hdr_len;
1158 if (skb_cow_head(skb, headroom)) {
1159 kfree_skb(skb);
1160 return NET_XMIT_DROP;
1161 }
1162
1163
1164 session->build_header(session, __skb_push(skb, hdr_len));
1165
1166
1167 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1168 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
1169 IPSKB_REROUTED);
1170 nf_reset(skb);
1171
1172 bh_lock_sock(sk);
1173 if (sock_owned_by_user(sk)) {
1174 kfree_skb(skb);
1175 ret = NET_XMIT_DROP;
1176 goto out_unlock;
1177 }
1178
1179
1180 skb_dst_drop(skb);
1181 skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
1182
1183 inet = inet_sk(sk);
1184 fl = &inet->cork.fl;
1185 switch (tunnel->encap) {
1186 case L2TP_ENCAPTYPE_UDP:
1187
1188 __skb_push(skb, sizeof(*uh));
1189 skb_reset_transport_header(skb);
1190 uh = udp_hdr(skb);
1191 uh->source = inet->inet_sport;
1192 uh->dest = inet->inet_dport;
1193 udp_len = uhlen + hdr_len + data_len;
1194 uh->len = htons(udp_len);
1195
1196
1197#if IS_ENABLED(CONFIG_IPV6)
1198 if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
1199 udp6_set_csum(udp_get_no_check6_tx(sk),
1200 skb, &inet6_sk(sk)->saddr,
1201 &sk->sk_v6_daddr, udp_len);
1202 else
1203#endif
1204 udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
1205 inet->inet_daddr, udp_len);
1206 break;
1207
1208 case L2TP_ENCAPTYPE_IP:
1209 break;
1210 }
1211
1212 l2tp_xmit_core(session, skb, fl, data_len);
1213out_unlock:
1214 bh_unlock_sock(sk);
1215
1216 return ret;
1217}
1218EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228static void l2tp_tunnel_destruct(struct sock *sk)
1229{
1230 struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
1231 struct l2tp_net *pn;
1232
1233 if (tunnel == NULL)
1234 goto end;
1235
1236 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
1237
1238
1239
1240 switch (tunnel->encap) {
1241 case L2TP_ENCAPTYPE_UDP:
1242
1243 (udp_sk(sk))->encap_type = 0;
1244 (udp_sk(sk))->encap_rcv = NULL;
1245 (udp_sk(sk))->encap_destroy = NULL;
1246 break;
1247 case L2TP_ENCAPTYPE_IP:
1248 break;
1249 }
1250
1251
1252 sk->sk_destruct = tunnel->old_sk_destruct;
1253 sk->sk_user_data = NULL;
1254 tunnel->sock = NULL;
1255
1256
1257 pn = l2tp_pernet(tunnel->l2tp_net);
1258 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1259 list_del_rcu(&tunnel->list);
1260 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1261 atomic_dec(&l2tp_tunnel_count);
1262
1263 l2tp_tunnel_closeall(tunnel);
1264 l2tp_tunnel_dec_refcount(tunnel);
1265
1266
1267 if (sk->sk_destruct)
1268 (*sk->sk_destruct)(sk);
1269end:
1270 return;
1271}
1272
1273
1274
1275void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1276{
1277 int hash;
1278 struct hlist_node *walk;
1279 struct hlist_node *tmp;
1280 struct l2tp_session *session;
1281
1282 BUG_ON(tunnel == NULL);
1283
1284 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing all sessions...\n",
1285 tunnel->name);
1286
1287 write_lock_bh(&tunnel->hlist_lock);
1288 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
1289again:
1290 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1291 session = hlist_entry(walk, struct l2tp_session, hlist);
1292
1293 l2tp_info(session, L2TP_MSG_CONTROL,
1294 "%s: closing session\n", session->name);
1295
1296 hlist_del_init(&session->hlist);
1297
1298 if (session->ref != NULL)
1299 (*session->ref)(session);
1300
1301 write_unlock_bh(&tunnel->hlist_lock);
1302
1303 __l2tp_session_unhash(session);
1304 l2tp_session_queue_purge(session);
1305
1306 if (session->session_close != NULL)
1307 (*session->session_close)(session);
1308
1309 if (session->deref != NULL)
1310 (*session->deref)(session);
1311
1312 l2tp_session_dec_refcount(session);
1313
1314 write_lock_bh(&tunnel->hlist_lock);
1315
1316
1317
1318
1319
1320
1321 goto again;
1322 }
1323 }
1324 write_unlock_bh(&tunnel->hlist_lock);
1325}
1326EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
1327
1328
1329static void l2tp_udp_encap_destroy(struct sock *sk)
1330{
1331 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
1332 if (tunnel) {
1333 l2tp_tunnel_closeall(tunnel);
1334 sock_put(sk);
1335 }
1336}
1337
1338
1339static void l2tp_tunnel_del_work(struct work_struct *work)
1340{
1341 struct l2tp_tunnel *tunnel = NULL;
1342 struct socket *sock = NULL;
1343 struct sock *sk = NULL;
1344
1345 tunnel = container_of(work, struct l2tp_tunnel, del_work);
1346
1347 l2tp_tunnel_closeall(tunnel);
1348
1349 sk = l2tp_tunnel_sock_lookup(tunnel);
1350 if (!sk)
1351 goto out;
1352
1353 sock = sk->sk_socket;
1354
1355
1356
1357
1358
1359
1360
1361
1362 if (tunnel->fd >= 0) {
1363 if (sock)
1364 inet_shutdown(sock, 2);
1365 } else {
1366 if (sock) {
1367 kernel_sock_shutdown(sock, SHUT_RDWR);
1368 sock_release(sock);
1369 }
1370 }
1371
1372 l2tp_tunnel_sock_put(sk);
1373out:
1374 l2tp_tunnel_dec_refcount(tunnel);
1375}
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386static int l2tp_tunnel_sock_create(struct net *net,
1387 u32 tunnel_id,
1388 u32 peer_tunnel_id,
1389 struct l2tp_tunnel_cfg *cfg,
1390 struct socket **sockp)
1391{
1392 int err = -EINVAL;
1393 struct socket *sock = NULL;
1394 struct udp_port_cfg udp_conf;
1395
1396 switch (cfg->encap) {
1397 case L2TP_ENCAPTYPE_UDP:
1398 memset(&udp_conf, 0, sizeof(udp_conf));
1399
1400#if IS_ENABLED(CONFIG_IPV6)
1401 if (cfg->local_ip6 && cfg->peer_ip6) {
1402 udp_conf.family = AF_INET6;
1403 memcpy(&udp_conf.local_ip6, cfg->local_ip6,
1404 sizeof(udp_conf.local_ip6));
1405 memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
1406 sizeof(udp_conf.peer_ip6));
1407 udp_conf.use_udp6_tx_checksums =
1408 ! cfg->udp6_zero_tx_checksums;
1409 udp_conf.use_udp6_rx_checksums =
1410 ! cfg->udp6_zero_rx_checksums;
1411 } else
1412#endif
1413 {
1414 udp_conf.family = AF_INET;
1415 udp_conf.local_ip = cfg->local_ip;
1416 udp_conf.peer_ip = cfg->peer_ip;
1417 udp_conf.use_udp_checksums = cfg->use_udp_checksums;
1418 }
1419
1420 udp_conf.local_udp_port = htons(cfg->local_udp_port);
1421 udp_conf.peer_udp_port = htons(cfg->peer_udp_port);
1422
1423 err = udp_sock_create(net, &udp_conf, &sock);
1424 if (err < 0)
1425 goto out;
1426
1427 break;
1428
1429 case L2TP_ENCAPTYPE_IP:
1430#if IS_ENABLED(CONFIG_IPV6)
1431 if (cfg->local_ip6 && cfg->peer_ip6) {
1432 struct sockaddr_l2tpip6 ip6_addr = {0};
1433
1434 err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
1435 IPPROTO_L2TP, &sock);
1436 if (err < 0)
1437 goto out;
1438
1439 ip6_addr.l2tp_family = AF_INET6;
1440 memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1441 sizeof(ip6_addr.l2tp_addr));
1442 ip6_addr.l2tp_conn_id = tunnel_id;
1443 err = kernel_bind(sock, (struct sockaddr *) &ip6_addr,
1444 sizeof(ip6_addr));
1445 if (err < 0)
1446 goto out;
1447
1448 ip6_addr.l2tp_family = AF_INET6;
1449 memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1450 sizeof(ip6_addr.l2tp_addr));
1451 ip6_addr.l2tp_conn_id = peer_tunnel_id;
1452 err = kernel_connect(sock,
1453 (struct sockaddr *) &ip6_addr,
1454 sizeof(ip6_addr), 0);
1455 if (err < 0)
1456 goto out;
1457 } else
1458#endif
1459 {
1460 struct sockaddr_l2tpip ip_addr = {0};
1461
1462 err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
1463 IPPROTO_L2TP, &sock);
1464 if (err < 0)
1465 goto out;
1466
1467 ip_addr.l2tp_family = AF_INET;
1468 ip_addr.l2tp_addr = cfg->local_ip;
1469 ip_addr.l2tp_conn_id = tunnel_id;
1470 err = kernel_bind(sock, (struct sockaddr *) &ip_addr,
1471 sizeof(ip_addr));
1472 if (err < 0)
1473 goto out;
1474
1475 ip_addr.l2tp_family = AF_INET;
1476 ip_addr.l2tp_addr = cfg->peer_ip;
1477 ip_addr.l2tp_conn_id = peer_tunnel_id;
1478 err = kernel_connect(sock, (struct sockaddr *) &ip_addr,
1479 sizeof(ip_addr), 0);
1480 if (err < 0)
1481 goto out;
1482 }
1483 break;
1484
1485 default:
1486 goto out;
1487 }
1488
1489out:
1490 *sockp = sock;
1491 if ((err < 0) && sock) {
1492 kernel_sock_shutdown(sock, SHUT_RDWR);
1493 sock_release(sock);
1494 *sockp = NULL;
1495 }
1496
1497 return err;
1498}
1499
1500static struct lock_class_key l2tp_socket_class;
1501
1502int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1503{
1504 struct l2tp_tunnel *tunnel = NULL;
1505 int err;
1506 struct socket *sock = NULL;
1507 struct sock *sk = NULL;
1508 struct l2tp_net *pn;
1509 enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1510
1511
1512
1513
1514
1515 if (fd < 0) {
1516 err = l2tp_tunnel_sock_create(net, tunnel_id, peer_tunnel_id,
1517 cfg, &sock);
1518 if (err < 0)
1519 goto err;
1520 } else {
1521 sock = sockfd_lookup(fd, &err);
1522 if (!sock) {
1523 pr_err("tunl %u: sockfd_lookup(fd=%d) returned %d\n",
1524 tunnel_id, fd, err);
1525 err = -EBADF;
1526 goto err;
1527 }
1528
1529
1530 if (!net_eq(sock_net(sock->sk), net)) {
1531 pr_err("tunl %u: netns mismatch\n", tunnel_id);
1532 err = -EINVAL;
1533 goto err;
1534 }
1535 }
1536
1537 sk = sock->sk;
1538
1539 if (cfg != NULL)
1540 encap = cfg->encap;
1541
1542
1543 switch (encap) {
1544 case L2TP_ENCAPTYPE_UDP:
1545 err = -EPROTONOSUPPORT;
1546 if (sk->sk_protocol != IPPROTO_UDP) {
1547 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1548 tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
1549 goto err;
1550 }
1551 break;
1552 case L2TP_ENCAPTYPE_IP:
1553 err = -EPROTONOSUPPORT;
1554 if (sk->sk_protocol != IPPROTO_L2TP) {
1555 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1556 tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
1557 goto err;
1558 }
1559 break;
1560 }
1561
1562
1563 tunnel = l2tp_tunnel(sk);
1564 if (tunnel != NULL) {
1565
1566 err = -EBUSY;
1567 goto err;
1568 }
1569
1570 tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL);
1571 if (tunnel == NULL) {
1572 err = -ENOMEM;
1573 goto err;
1574 }
1575
1576 tunnel->version = version;
1577 tunnel->tunnel_id = tunnel_id;
1578 tunnel->peer_tunnel_id = peer_tunnel_id;
1579 tunnel->debug = L2TP_DEFAULT_DEBUG_FLAGS;
1580
1581 tunnel->magic = L2TP_TUNNEL_MAGIC;
1582 sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1583 rwlock_init(&tunnel->hlist_lock);
1584
1585
1586 tunnel->l2tp_net = net;
1587 pn = l2tp_pernet(net);
1588
1589 if (cfg != NULL)
1590 tunnel->debug = cfg->debug;
1591
1592#if IS_ENABLED(CONFIG_IPV6)
1593 if (sk->sk_family == PF_INET6) {
1594 struct ipv6_pinfo *np = inet6_sk(sk);
1595
1596 if (ipv6_addr_v4mapped(&np->saddr) &&
1597 ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
1598 struct inet_sock *inet = inet_sk(sk);
1599
1600 tunnel->v4mapped = true;
1601 inet->inet_saddr = np->saddr.s6_addr32[3];
1602 inet->inet_rcv_saddr = sk->sk_v6_rcv_saddr.s6_addr32[3];
1603 inet->inet_daddr = sk->sk_v6_daddr.s6_addr32[3];
1604 } else {
1605 tunnel->v4mapped = false;
1606 }
1607 }
1608#endif
1609
1610
1611 tunnel->encap = encap;
1612 if (encap == L2TP_ENCAPTYPE_UDP) {
1613 struct udp_tunnel_sock_cfg udp_cfg = { };
1614
1615 udp_cfg.sk_user_data = tunnel;
1616 udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP;
1617 udp_cfg.encap_rcv = l2tp_udp_encap_recv;
1618 udp_cfg.encap_destroy = l2tp_udp_encap_destroy;
1619
1620 setup_udp_tunnel_sock(net, sock, &udp_cfg);
1621 } else {
1622 sk->sk_user_data = tunnel;
1623 }
1624
1625
1626
1627
1628 tunnel->old_sk_destruct = sk->sk_destruct;
1629 sk->sk_destruct = &l2tp_tunnel_destruct;
1630 tunnel->sock = sk;
1631 tunnel->fd = fd;
1632 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
1633
1634 sk->sk_allocation = GFP_ATOMIC;
1635
1636
1637 INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1638
1639
1640 INIT_LIST_HEAD(&tunnel->list);
1641 atomic_inc(&l2tp_tunnel_count);
1642
1643
1644
1645
1646 refcount_set(&tunnel->ref_count, 1);
1647 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1648 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1649 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1650
1651 err = 0;
1652err:
1653 if (tunnelp)
1654 *tunnelp = tunnel;
1655
1656
1657
1658
1659 if (sock && sock->file)
1660 sockfd_put(sock);
1661
1662 return err;
1663}
1664EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1665
1666
1667
1668int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1669{
1670 l2tp_tunnel_inc_refcount(tunnel);
1671 if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
1672 l2tp_tunnel_dec_refcount(tunnel);
1673 return 1;
1674 }
1675 return 0;
1676}
1677EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1678
1679
1680
1681void l2tp_session_free(struct l2tp_session *session)
1682{
1683 struct l2tp_tunnel *tunnel = session->tunnel;
1684
1685 BUG_ON(refcount_read(&session->ref_count) != 0);
1686
1687 if (tunnel) {
1688 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1689 if (session->session_id != 0)
1690 atomic_dec(&l2tp_session_count);
1691 sock_put(tunnel->sock);
1692 session->tunnel = NULL;
1693 l2tp_tunnel_dec_refcount(tunnel);
1694 }
1695
1696 kfree(session);
1697}
1698EXPORT_SYMBOL_GPL(l2tp_session_free);
1699
1700
1701
1702
1703
1704
1705void __l2tp_session_unhash(struct l2tp_session *session)
1706{
1707 struct l2tp_tunnel *tunnel = session->tunnel;
1708
1709
1710 if (tunnel) {
1711
1712 write_lock_bh(&tunnel->hlist_lock);
1713 hlist_del_init(&session->hlist);
1714 write_unlock_bh(&tunnel->hlist_lock);
1715
1716
1717 if (tunnel->version != L2TP_HDR_VER_2) {
1718 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1719 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1720 hlist_del_init_rcu(&session->global_hlist);
1721 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1722 synchronize_rcu();
1723 }
1724 }
1725}
1726EXPORT_SYMBOL_GPL(__l2tp_session_unhash);
1727
1728
1729
1730
1731int l2tp_session_delete(struct l2tp_session *session)
1732{
1733 if (session->ref)
1734 (*session->ref)(session);
1735 __l2tp_session_unhash(session);
1736 l2tp_session_queue_purge(session);
1737 if (session->session_close != NULL)
1738 (*session->session_close)(session);
1739 if (session->deref)
1740 (*session->deref)(session);
1741 l2tp_session_dec_refcount(session);
1742 return 0;
1743}
1744EXPORT_SYMBOL_GPL(l2tp_session_delete);
1745
1746
1747
1748
1749void l2tp_session_set_header_len(struct l2tp_session *session, int version)
1750{
1751 if (version == L2TP_HDR_VER_2) {
1752 session->hdr_len = 6;
1753 if (session->send_seq)
1754 session->hdr_len += 4;
1755 } else {
1756 session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset;
1757 if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
1758 session->hdr_len += 4;
1759 }
1760
1761}
1762EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1763
1764struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1765{
1766 struct l2tp_session *session;
1767 int err;
1768
1769 session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
1770 if (session != NULL) {
1771 session->magic = L2TP_SESSION_MAGIC;
1772 session->tunnel = tunnel;
1773
1774 session->session_id = session_id;
1775 session->peer_session_id = peer_session_id;
1776 session->nr = 0;
1777 if (tunnel->version == L2TP_HDR_VER_2)
1778 session->nr_max = 0xffff;
1779 else
1780 session->nr_max = 0xffffff;
1781 session->nr_window_size = session->nr_max / 2;
1782 session->nr_oos_count_max = 4;
1783
1784
1785 session->reorder_skip = 1;
1786
1787 sprintf(&session->name[0], "sess %u/%u",
1788 tunnel->tunnel_id, session->session_id);
1789
1790 skb_queue_head_init(&session->reorder_q);
1791
1792 INIT_HLIST_NODE(&session->hlist);
1793 INIT_HLIST_NODE(&session->global_hlist);
1794
1795
1796 session->debug = tunnel->debug;
1797
1798 if (cfg) {
1799 session->pwtype = cfg->pw_type;
1800 session->debug = cfg->debug;
1801 session->mtu = cfg->mtu;
1802 session->mru = cfg->mru;
1803 session->send_seq = cfg->send_seq;
1804 session->recv_seq = cfg->recv_seq;
1805 session->lns_mode = cfg->lns_mode;
1806 session->reorder_timeout = cfg->reorder_timeout;
1807 session->offset = cfg->offset;
1808 session->l2specific_type = cfg->l2specific_type;
1809 session->l2specific_len = cfg->l2specific_len;
1810 session->cookie_len = cfg->cookie_len;
1811 memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1812 session->peer_cookie_len = cfg->peer_cookie_len;
1813 memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1814 }
1815
1816 if (tunnel->version == L2TP_HDR_VER_2)
1817 session->build_header = l2tp_build_l2tpv2_header;
1818 else
1819 session->build_header = l2tp_build_l2tpv3_header;
1820
1821 l2tp_session_set_header_len(session, tunnel->version);
1822
1823 refcount_set(&session->ref_count, 1);
1824
1825 err = l2tp_session_add_to_tunnel(tunnel, session);
1826 if (err) {
1827 kfree(session);
1828
1829 return ERR_PTR(err);
1830 }
1831
1832 l2tp_tunnel_inc_refcount(tunnel);
1833
1834
1835 sock_hold(tunnel->sock);
1836
1837
1838 if (session->session_id != 0)
1839 atomic_inc(&l2tp_session_count);
1840
1841 return session;
1842 }
1843
1844 return ERR_PTR(-ENOMEM);
1845}
1846EXPORT_SYMBOL_GPL(l2tp_session_create);
1847
1848
1849
1850
1851
1852static __net_init int l2tp_init_net(struct net *net)
1853{
1854 struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1855 int hash;
1856
1857 INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
1858 spin_lock_init(&pn->l2tp_tunnel_list_lock);
1859
1860 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1861 INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
1862
1863 spin_lock_init(&pn->l2tp_session_hlist_lock);
1864
1865 return 0;
1866}
1867
1868static __net_exit void l2tp_exit_net(struct net *net)
1869{
1870 struct l2tp_net *pn = l2tp_pernet(net);
1871 struct l2tp_tunnel *tunnel = NULL;
1872
1873 rcu_read_lock_bh();
1874 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
1875 (void)l2tp_tunnel_delete(tunnel);
1876 }
1877 rcu_read_unlock_bh();
1878
1879 flush_workqueue(l2tp_wq);
1880 rcu_barrier();
1881}
1882
1883static struct pernet_operations l2tp_net_ops = {
1884 .init = l2tp_init_net,
1885 .exit = l2tp_exit_net,
1886 .id = &l2tp_net_id,
1887 .size = sizeof(struct l2tp_net),
1888};
1889
1890static int __init l2tp_init(void)
1891{
1892 int rc = 0;
1893
1894 rc = register_pernet_device(&l2tp_net_ops);
1895 if (rc)
1896 goto out;
1897
1898 l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
1899 if (!l2tp_wq) {
1900 pr_err("alloc_workqueue failed\n");
1901 unregister_pernet_device(&l2tp_net_ops);
1902 rc = -ENOMEM;
1903 goto out;
1904 }
1905
1906 pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1907
1908out:
1909 return rc;
1910}
1911
1912static void __exit l2tp_exit(void)
1913{
1914 unregister_pernet_device(&l2tp_net_ops);
1915 if (l2tp_wq) {
1916 destroy_workqueue(l2tp_wq);
1917 l2tp_wq = NULL;
1918 }
1919}
1920
1921module_init(l2tp_init);
1922module_exit(l2tp_exit);
1923
1924MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1925MODULE_DESCRIPTION("L2TP core");
1926MODULE_LICENSE("GPL");
1927MODULE_VERSION(L2TP_DRV_VERSION);
1928
1929